diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..17018f14af --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @VaibhavPage @whynowy diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000000..5072904bd9 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,18 @@ +version: 2 +updates: + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + day: "saturday" + ignore: + - dependency-name: k8s.io/* + - dependency-name: sigs.k8s.io/controller-runtime + - dependency-name: github.com/aws/aws-sdk-go # too often, and usually not related. + open-pull-requests-limit: 10 + + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + day: "saturday" diff --git a/.github/workflows/changelog.yaml b/.github/workflows/changelog.yaml new file mode 100644 index 0000000000..8c775b66c4 --- /dev/null +++ b/.github/workflows/changelog.yaml @@ -0,0 +1,34 @@ +name: Changelog + +on: + push: + tags: + - v* + - "!v.0.9" + +permissions: + contents: read + +jobs: + generate_changelog: + permissions: + contents: write # for peter-evans/create-pull-request to create branch + pull-requests: write # for peter-evans/create-pull-request to create a PR + if: github.repository == 'argoproj/argo-events' + runs-on: ubuntu-latest + name: Generate changelog + steps: + - uses: actions/checkout@v4 + with: + ref: master + fetch-depth: 0 + - run: git fetch --prune --prune-tags + - run: git tag -l 'v*' + # avoid invoking `make` to reduce the risk of a Makefile bug failing this workflow + - run: ./hack/changelog.sh > CHANGELOG.md + - uses: peter-evans/create-pull-request@v6 + with: + title: 'docs: updated CHANGELOG.md' + commit-message: 'docs: updated CHANGELOG.md' + signoff: true + diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 61fcbb339a..8149cbfb94 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -5,33 +5,40 @@ on: - "master" - "release-*" pull_request: - branches: [ master ] + branches: + - "master" + - "release-*" + +permissions: + contents: read + jobs: codegen: name: Codegen runs-on: ubuntu-latest - timeout-minutes: 5 + timeout-minutes: 7 steps: - name: Checkout code - uses: actions/checkout@v2 - - name: Restore go build cache - uses: actions/cache@v1 - with: - path: ~/.cache/go-build - key: ${{ runner.os }}-go-build-v1-${{ hashFiles('**/go.mod') }} + uses: actions/checkout@v4 - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@v5 with: - go-version: "1.15.7" - - name: Add bins to PATH - run: | - echo /home/runner/go/bin >> $GITHUB_PATH - echo /usr/local/bin >> $GITHUB_PATH + go-version: "1.21" + - name: Restore go build cache + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- - name: Install protoc run: | set -eux -o pipefail - PROTOC_ZIP=protoc-3.11.1-linux-x86_64.zip - curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.11.1/$PROTOC_ZIP + PROTOC_VERSION=3.19.4 + PROTOC_ZIP=protoc-$PROTOC_VERSION-linux-x86_64.zip + curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v$PROTOC_VERSION/$PROTOC_ZIP sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' sudo chmod +x /usr/local/bin/protoc @@ -42,17 +49,16 @@ jobs: - name: Install pandoc run: | set -eux -o pipefail - PANDOC_ZIP=pandoc-2.11.1-linux-amd64.tar.gz - curl -OL https://github.com/jgm/pandoc/releases/download/2.11.1/$PANDOC_ZIP + PANDOC_VERSION=2.17.1 + PANDOC_ZIP=pandoc-$PANDOC_VERSION-linux-amd64.tar.gz + curl -OL https://github.com/jgm/pandoc/releases/download/$PANDOC_VERSION/$PANDOC_ZIP sudo tar xvzf $PANDOC_ZIP --strip-components 1 -C /usr/local rm -f $PANDOC_ZIP - echo /usr/local/pandoc-2.11.1/bin >> $GITHUB_PATH + echo /usr/local/pandoc-$PANDOC_VERSION/bin >> $GITHUB_PATH - name: Get dependencies run: go mod download - name: Make codegen - run: | - echo 'GOPATH=/home/runner/go' >> $GITHUB_ENV - make -B codegen + run: make -B codegen - name: Ensure nothing changed run: git diff --exit-code @@ -61,58 +67,89 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 10 steps: - - name: Set up Go 1.x - uses: actions/setup-go@v2 + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Golang + uses: actions/setup-go@v5 with: - go-version: "1.15.7" - id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 - - name: Restore Go build cache - uses: actions/cache@v1 + go-version: "1.21" + - name: Restore go build cache + uses: actions/cache@v4 with: - path: ~/.cache/go-build - key: ${{ runner.os }}-go-build-v1-${{ github.run_id }} + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- - name: Get dependencies run: go mod download - - name: Test + - name: Run tests run: make test - e2e-tests: - name: E2E Tests + lint: + name: lint runs-on: ubuntu-latest - timeout-minutes: 20 + timeout-minutes: 10 + env: + GOPATH: /home/runner/go steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v4 + - name: Setup Golang + uses: actions/setup-go@v5 + with: + go-version: "1.21" - name: Restore go build cache - uses: actions/cache@v1 + uses: actions/cache@v4 with: - path: ~/.cache/go-build - key: ${{ runner.os }}-go-build-v2-${{ hashFiles('**/go.mod') }} + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + - run: make lint + - run: git diff --exit-code + + e2e-tests: + name: E2E Tests + runs-on: ubuntu-latest + timeout-minutes: 25 + env: + KUBECONFIG: /home/runner/.kubeconfig + strategy: + fail-fast: false + max-parallel: 4 + matrix: + include: + - driver: stan + - driver: jetstream + - driver: kafka + steps: + - name: Checkout code + uses: actions/checkout@v4 - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@v5 with: - go-version: "1.15.7" - - name: Add bins to PATH - run: | - echo /home/runner/go/bin >> $GITHUB_PATH - echo /usr/local/bin >> $GITHUB_PATH + go-version: "1.21" + - name: Restore go build cache + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- - name: Install k3d run: curl -sfL https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash & - - name: Create a registry and a cluster + - name: Create k3d registry and cluster run: | k3d registry create e2e-registry --port 5111 - k3d cluster create e2e --registry-use k3d-e2e-registry:5111 - k3d kubeconfig get e2e > ~/.kube/argo-events-e2e-config + k3d cluster create e2e -i rancher/k3s:v1.21.7-k3s1 --registry-use k3d-e2e-registry:5111 echo '127.0.0.1 k3d-e2e-registry' | sudo tee -a /etc/hosts - - name: Install Argo Events - env: - GOPATH: /home/runner/go - run: | - KUBECONFIG=~/.kube/argo-events-e2e-config IMAGE_NAMESPACE=k3d-e2e-registry:5111 VERSION=${{ github.sha }} DOCKER_PUSH=true make start - name: Run tests - env: - GOPATH: /home/runner/go - run: KUBECONFIG=~/.kube/argo-events-e2e-config make test-functional - + run: | + IMAGE_NAMESPACE=k3d-e2e-registry:5111 VERSION=${{ github.sha }} DOCKER_PUSH=true make start + EventBusDriver=${{ matrix.driver }} make test-functional diff --git a/.github/workflows/default-branch-check.yaml b/.github/workflows/default-branch-check.yaml new file mode 100644 index 0000000000..5e4ac0a54a --- /dev/null +++ b/.github/workflows/default-branch-check.yaml @@ -0,0 +1,18 @@ +name: PR check + +on: + pull_request: + branches: + - "release-*" + +jobs: + test-default-branch: + name: base branch is a default branch + runs-on: ubuntu-latest + steps: + - name: fail if base branch is not default branch + if: ${{ github.event.pull_request.base.ref != github.event.repository.default_branch }} + uses: actions/github-script@v3 + with: + script: | + core.setFailed("Base branch of the PR - ${{ github.event.pull_request.base.ref }} is not a default branch. Please reopen your PR to ${{ github.event.repository.default_branch }}") diff --git a/.github/workflows/dependabot-reviewer.yml b/.github/workflows/dependabot-reviewer.yml new file mode 100644 index 0000000000..5d325bcda6 --- /dev/null +++ b/.github/workflows/dependabot-reviewer.yml @@ -0,0 +1,30 @@ +# https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions +name: Approve and enable auto-merge for dependabot +on: pull_request + +permissions: + contents: read + +jobs: + review: + runs-on: ubuntu-latest + permissions: + pull-requests: write + contents: write + if: ${{ github.actor == 'dependabot[bot]' }} + steps: + - name: Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@v2.1.0 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Approve PR + run: gh pr review --approve "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + - name: Enable auto-merge for Dependabot PRs + run: gh pr merge --auto --squash "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} \ No newline at end of file diff --git a/.github/workflows/gh-pages.yaml b/.github/workflows/gh-pages.yaml index ad446450b7..95700d1f87 100644 --- a/.github/workflows/gh-pages.yaml +++ b/.github/workflows/gh-pages.yaml @@ -1,29 +1,35 @@ -name: Deploy +name: gh-pages on: push: branches: - master +permissions: + contents: read + jobs: deploy: + permissions: + contents: write # for peaceiris/actions-gh-pages to push pages branch + if: github.repository == 'argoproj/argo-events' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - name: Setup Python - uses: actions/setup-python@v1 + uses: actions/setup-python@v5 with: - python-version: 3.x + python-version: 3.9 - name: Setup Golang - uses: actions/setup-go@v1 + uses: actions/setup-go@v5 with: - go-version: '1.15.7' + go-version: '1.21' - name: build run: | - pip install mkdocs==1.0.4 mkdocs_material==4.1.1 + pip install mkdocs==1.3.0 mkdocs_material==8.2.9 mkdocs build - name: deploy - uses: peaceiris/actions-gh-pages@v3 + uses: peaceiris/actions-gh-pages@v4 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./site diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml deleted file mode 100644 index 15f3257226..0000000000 --- a/.github/workflows/golangci-lint.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: golangci-lint -on: - push: - branches: [ master ] - pull_request: - branches: [ master ] -jobs: - golangci: - name: lint - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - name: Add to PATH - run: | - echo /home/runner/go/bin >> $GITHUB_PATH - - name: golangci-lint - run: | - echo 'GOPATH=/home/runner/go' >> $GITHUB_ENV - make lint diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 1710959568..dc67954c1f 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -5,24 +5,28 @@ on: tags: - 'v*' branches: - - master + - "master" + - "release-*" defaults: run: shell: bash +permissions: + contents: read + jobs: build-binaries: runs-on: ubuntu-20.04 name: Build binaries steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Setup Go - uses: actions/setup-go@v2 + uses: actions/setup-go@v5 with: - go-version: 1.15 + go-version: "1.21" - name: Build binaries run: | @@ -31,7 +35,7 @@ jobs: - name: Make checksums run: make checksums - name: store artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: binaries path: dist @@ -43,25 +47,27 @@ jobs: strategy: matrix: target: [ argo-events ] + outputs: + VERSION: ${{ steps.version.outputs.VERSION }} steps: - - uses: actions/checkout@v2 - + - uses: actions/checkout@v4 + - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Download binaries - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: binaries path: dist/ - name: Registry Login - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: quay.io username: ${{ secrets.QUAYIO_USERNAME }} password: ${{ secrets.QUAYIO_PASSWORD }} - + - name: set Version id: version run: | @@ -69,28 +75,114 @@ jobs: if [ $tag = "master" ]; then tag="latest" fi - echo "::set-output name=VERSION::$tag" + echo "VERSION=$tag" >> $GITHUB_OUTPUT - name: Container build and push with arm64/amd64 + env: + IMAGE_NAMESPACE: quay.io/${{ secrets.QUAYIO_ORG }} run: | - IMAGE_NAMESPACE=${{ secrets.QUAYIO_ORG }} VERSION=${{ steps.version.outputs.VERSION }} DOCKER_PUSH=true make image-multi - - release: + VERSION=${{ steps.version.outputs.VERSION }} DOCKER_PUSH=true make image-multi + + bom: runs-on: ubuntu-latest needs: [ build-push-linux-multi ] steps: + # https://stackoverflow.com/questions/58033366/how-to-get-current-branch-within-github-actions + - id: version + run: | + if [ ${GITHUB_REF##*/} = master ]; then + echo "VERSION=latest" >> $GITHUB_ENV + else + echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_OUTPUT + fi + - uses: actions/setup-go@v5 + with: + go-version: "1.21" + - uses: actions/checkout@v4 + - run: go install sigs.k8s.io/bom/cmd/bom@v0.5.1 + - run: go install github.com/spdx/spdx-sbom-generator/cmd/generator@v0.0.13 + - run: mkdir -p dist + - run: generator -o dist -p . + # do not scan images, this is only supported for debian-based images. See: https://github.com/kubernetes-sigs/bom#usage + - env: + VERSION: ${{ steps.version.outputs.VERSION }} + run: + bom generate --scan-images=false --image quay.io/${{ secrets.QUAYIO_ORG }}/argo-events:$VERSION -o /tmp/argo-events.spdx + # pack the boms into one file to make it easy to download + - run: cd /tmp && tar -zcf sbom.tar.gz *.spdx + - uses: actions/upload-artifact@v3 + with: + name: sbom.tar.gz + path: /tmp/sbom.tar.gz + + release: + runs-on: ubuntu-latest + needs: [ build-push-linux-multi, bom ] + permissions: + contents: write # for softprops/action-gh-release to create GitHub release + id-token: write # Needed to create an OIDC token for keyless signing + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set Version + run: | + if [ ${GITHUB_REF##*/} = master ]; then + echo "VERSION=latest" >> $GITHUB_ENV + else + echo "VERSION=${GITHUB_REF##*/}" >> $GITHUB_ENV + fi + - name: Download binaries - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: binaries path: dist/ + - uses: actions/download-artifact@v3 + with: + name: sbom.tar.gz + path: /tmp + + - name: Registry Login + uses: docker/login-action@v2 + with: + registry: quay.io + username: ${{ secrets.QUAYIO_USERNAME }} + password: ${{ secrets.QUAYIO_PASSWORD }} + + - name: Install cosign + uses: sigstore/cosign-installer@59acb6260d9c0ba8f4a2f9d9b48431a222b68e20 # v3.5.0 + with: + cosign-release: 'v2.2.3' + - name: Install crane to get digest of image + uses: imjasonh/setup-crane@e82f1b9a8007d399333baba4d75915558e9fb6a4 # v0.2 + + - name: Get digests of container images + id: get-digest + env: + VERSION: ${{ needs.build-push-linux-multi.outputs.VERSION }} + run: | + echo "digest=$(crane digest quay.io/${{ secrets.QUAYIO_ORG }}/argo-events:$VERSION)" >> $GITHUB_OUTPUT + + - name: Sign Argo Events container image and assets + env: + IMAGE_DIGEST: ${{ steps.get-digest.outputs.digest }} + run: | + cosign sign -y quay.io/${{ secrets.QUAYIO_ORG }}/argo-events@$IMAGE_DIGEST + cosign sign-blob -y ./dist/argo-events-checksums.txt > ./dist/argo-events-checksums.sig + cosign sign-blob -y /tmp/sbom.tar.gz > /tmp/sbom.tar.gz.sig + - name: Release binaries - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 if: startsWith(github.ref, 'refs/tags/') with: files: | dist/*.gz - dist/*.gz.sha256 + dist/argo-events-checksums.txt + dist/argo-events-checksums.sig + manifests/*.yaml + /tmp/sbom.tar.gz + /tmp/sbom.tar.gz.sig env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/snyk.yaml b/.github/workflows/snyk.yaml index 675a22b2ba..f0ba229f8a 100644 --- a/.github/workflows/snyk.yaml +++ b/.github/workflows/snyk.yaml @@ -1,13 +1,22 @@ name: Snyk -on: push +on: + schedule: + - cron: "30 2 * * *" + +permissions: + contents: read + jobs: - security: + # we do not scan images here, they're scanned here: https://app.snyk.io/org/argoproj/projects + + golang: + if: github.repository == 'argoproj/argo-events' runs-on: ubuntu-latest + env: + SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} steps: - - uses: actions/checkout@master + - uses: actions/checkout@v4 - name: Run Snyk to check for vulnerabilities uses: snyk/actions/golang@master - env: - SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} with: args: --severity-threshold=high diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 43ea50c9a5..c894e3c3e3 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -4,11 +4,17 @@ on: schedule: - cron: '21 1 * * *' +permissions: + contents: read + jobs: stale: + permissions: + issues: write # for actions/stale to close stale issues + pull-requests: write # for actions/stale to close stale PRs runs-on: 'ubuntu-latest' steps: - - uses: 'actions/stale@v3' + - uses: 'actions/stale@v9' with: stale-issue-message: |- This issue has been automatically marked as stale because it has not had @@ -24,4 +30,3 @@ jobs: days-before-stale: 60 days-before-close: 7 - diff --git a/.gitignore b/.gitignore index a7aaed828c..f66faf82f4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,8 +1,10 @@ .vscode/ .idea/ .DS_Store +.env vendor/ dist/ +venv/ # delve debug binaries cmd/**/debug hack/**/debug @@ -12,8 +14,9 @@ debug.test *.out site/ /go-diagrams/ +argo-events # telepresence .env telepresence.log # codefresh -update_argo_events_manifests.sh +update_argo_events_manifests.sh \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml index 2d3b24050a..b299d14b83 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -9,7 +9,7 @@ run: linters: enable: - deadcode - - depguard + # - depguard - dogsled - goconst - gocritic @@ -19,15 +19,13 @@ linters: - gosimple - govet - ineffassign - - interfacer - misspell - nakedret - rowserrcheck - staticcheck - - structcheck + # - structcheck - typecheck - unconvert - unused - varcheck - whitespace - diff --git a/CHANGELOG.md b/CHANGELOG.md index 432490a24a..259dbbac26 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,194 +1,2240 @@ # Changelog -## v1.0.0 - -### Features & Enhancements - -+ Pulsar event source. #774 -+ Simplified Circuit and Switch with Conditions. #834 -+ Simplified TLS config for EventSources and Sensors. #830 -+ Kafka Consumer Group. #817 -+ Added `NotEqualTo` comparator to data filters. #822 -+ Rate limit for Kafka and initial offset support and refactor config. #829 -+ Support for re-using existing subscription ID for gcp pubsub. #778 -+ Simple health check endpoint for webhooks. #823 -+ Simple Authentication for webhook related event sources. #826 -+ Connect to git with InsecureIgnoreHostKey. #842 -+ Switched to use volumes for injected secrets and configMaps in triggers. #792 -+ Added user metadata to eventsources.#797 -+ Added liveness and readiness check for controllers. #837 -+ Exposed metadata for sensors and fix metadata for eventsources. #773 -+ Upgraded argo cli to v2.10.x. #844 - -### Bugfixes - -+ Updated log path to be together with data path. #833 -+ Switched slack lib and stop using deprecated APIs. #777 -+ Added/fixed tolerations for CRDs. #787 -+ Azure eventsource fix. #789 -+ Added logic to ticker loop to ensure subscription always exists with an open connection. #861 -+ Corrected response code for slack event source. #811 -+ Fixed Azure EventsHub issue. #846 -+ Added labels to created k8s objects. #805 -+ Retry when connect to eventbus in sensor. #816 -+ Fixed Calendar timezone issue. #801 - -## v0.17.0 - -+ Introduced Eventbus #674 -+ Merged EventSource and Gateway CR #735 -+ Re-implementation of sensor-controller and the sensor #723 -+ Refined auto-connect for the eventbus #761 -+ Introduced metrics to eventbus #745 -+ Different deployment update strategy for different event sources #755 -+ Removed Argo Workflow package dependency #758 -+ Enhanced time filter in the sensor #737 -+ Kafka EventSource non-tls connection fix #704 -+ Correct pkg structure and proto file generation #701 - -## v0.16.0 - -+ Add affinity, tolerations in template and clusterIP in service #663 -+ Support for using a live K8s resource in standard K8s triggers #671 -+ Introduce EventBus CRD #674 -+ Enhanced filter for resource gateway #682 -+ Fix gateway client http requests #684 -+ Fix k8s trigger operations #695 -+ Support to register multiple events in a GitLab webhook #696 - -## v0.15.0 - -+ Made deployment/Service template optional in Gateway #598 -+ Made deployment/Service template optional in Sensor #599 -+ Support K8s Trigger Patch Operation #643 -+ Support headers for HTTP trigger #642 -+ Introduced Custom Triggers #620 -+ Enabled workflow identity for GCP PubSub gateway #593 - -## v0.14.0 - -+ Introducing Slack notification trigger. #576 -+ Introducing OpenWhisk trigger. #585 -+ TLS support for messaging queue gateways. #580 -+ Support for list of event types for resource gateway. #561 -+ Added label and field selectors for resource gateway. #571 -+ Basic Auth for HTTP trigger. #559 -+ Namespace defined in event sources for secret retrieval is optional. #569 - -## v0.13.0 -+ Add AWS-SQS Cross Account Queue Monitoring #547 -+ Enhanced Data Filter Comparator #544 -+ Support Json Body as an option for gateways #542 -+ Fix duplicate webhooks issue for GitLab Gateway #537 -+ Add request headers to GitHub and GitLab event payload #536,#533 -+ Fix gateway nats subscribers logic bug #535 -+ Added Kafka message publisher as trigger. #530 -+ Added Nats message publisher as trigger #531 -+ Support for Argo Rollout as trigger #529 -+ Update Argo workflows dependency to 2.6.1 #528 -+ Cleanup OpenFaas trigger implementation #526 -+ Implemented Assume RoleARN for SQS and SNS #519 -+ Complete Gateways & Sensors installation guide #548 - -## v0.13.0-rc -+ Enriched structure for events generated by the gateways #493. Refer https://github.com/argoproj/argo-events/blob/master/pkg/apis/events/event-data.go -+ Introducing the custom triggers (Beta) #493 -+ Added support for generic event source #467 -+ Introducing gateway setup guides #507 -+ Fix filters for the resource gateway #499 -+ Fix Kafka gateway issue #490 - -## v0.12.0 -+ Added HTTP, AWS Lambda, OpenFaas triggers. -+ Support for special operations like submit, resubmit, retry, etc. for Argo workflow triggers -+ Support for Create and Update operations for K8s resource triggers. -+ Added Redis, Emitter, Stripe, NSQ, and Azure Events Hub gateways. -+ Simplified gateway subscriber logic. - -## v0.12.0-rc -+ Support Event Source as K8s custom resource #377 - -## v0.11 -+ Fix transform issue when doing go build #389 -+ Workflow parameters are ignored when triggered through argo-events #373 -+ Allow regular expression in event filters #360 -+ volumes doesn't work in Workflow triggered webhook-sensor #342 - -## v0.10 -+ Added ability to refer the eventSource in a different namespace #311 -+ Ability to send events sensors in different namespace #317 -+ Support different trigger parameter operations #319 -+ Support fetching/checkouts of non-branch/-tag git refs #322 -+ Feature/support slack interaction actions #324 -+ Gcp Pubsub Gateway Quality of life Fixes #326 -+ Fix circuit bug #327 -+ Pub/Sub: multi-project support #330 -+ Apply resource parameters before defaulting namespace #331 -+ Allow watching of resources in all namespaces at once #334 -+ upport adding Github hooks without requiring a Github webhook id to be hardcoded #352 - -## v0.9 -+ Support applying parameters for complete trigger spec #230 -+ Increased test coverage #220 -+ Gateway and Sensor pods are managed by the framework in the event of deletion #194 -+ Add URL to webhook like gateways #216 -+ Improved file gateway with path regex support #213 -+ TLS support for webhook #206 -+ Bug fix for Github gateway #243 - -## v0.8 -+ Support for arbitrary boolean logic to resolve event dependencies #12 -+ Ability to add timezone and extra user payload to calendar gateway #164 -+ Data filter bug fix #165 -+ Added multi-fields/multi-values data filter #167 -+ Added support for backoff option when making connections in stream gateway #168 -+ AWS SNS Gateway #169 -+ GCP PubSub Gateway #176 -+ Support for git as trigger source #171 -+ Enhance Gitlab & Github gateway with in-build http server #172 - -## v0.7 -+ Refactor gateways #147 -+ Fixed sensor error state recovery bug #145 -+ Ability to add annotations on sensor and gateway pods #143 -+ Github gateway -+ Added support for NATS standard and streaming as communication protocol between gateway - and sensor #99 - -## v0.6 -+ Gitlab Gateway #120 -+ If sensor is repeatable then deploy it as deployment else job #109 -+ Start gateway containers in correct order. Gateway transformer > gateway processor. Add readiness probe to gateway transformer #106 -+ Kubernetes configmaps as artifact locations for triggers #104 -+ Let user set extra environment variable for sensor pod #103 -+ Ability to provide complete Pod spec in gateway. -+ The schedule for calendar gateway is now in standard cron format #102 -+ FileWatcher as core gateway #98 -+ Tutorials on setting up pipelines #105 -+ Asciinema recording for setup #107 -+ StorageGrid Gateway -+ Scripts to generate swagger specs from gateway structs -+ Added support to pass non JSON payload to trigger -+ Update shopify's sarama to 1.19.0 to support Kafka 2.0 - - -## v0.5 -[#92](https://github.com/argoproj/argo-events/pull/92) -+ Introduced gateways as event generators. -+ Added multiple flavors of gateway - core gateways, gRPC gateways, HTTP gateways, custom gateways -+ Added K8 events to capture gateway configurations update and as means to update gateway resource -+ SLA violations are now reported through k8 events -+ Sensors can trigger Argo workflow, any kubernetes resource and gateway -+ Gateway can send events to other gateways and sensors -+ Added examples for gateway and sensors -+ Sensors are now repeatable and fixed all issues with signal repeatability. -+ Removed signal deployments as microservices. - -## v0.5-beta1 (tbd) -+ Signals as separate deployments [#49](https://github.com/argoproj/argo-events/pull/49) -+ Fixed code-gen bug [#46](https://github.com/argoproj/argo-events/issues/46) -+ Filters for signals [#26](https://github.com/argoproj/argo-events/issues/26) -+ Inline, file and url sources for trigger workflows [#41](https://github.com/argoproj/argo-events/issues/41) - -## v0.5-alpha1 -+ Initial release +## v1.8.1 (2023-08-31) + + * [325d4385](https://github.com/argoproj/argo-events/commit/325d4385ae25ca66ddfa7c0ac8c0adb22d5dc5f4) Update manifests to v1.8.1 + * [ee3303e3](https://github.com/argoproj/argo-events/commit/ee3303e38b483595ae92665b0a369eab420d129a) chore(deps): bump github.com/antonmedv/expr from 1.14.0 to 1.14.3 (#2774) + * [442a0178](https://github.com/argoproj/argo-events/commit/442a0178632a03745fb61e1fe7ffde8ec881ba91) chore(deps): bump github.com/antonmedv/expr from 1.13.0 to 1.14.0 (#2760) + * [f7216dfa](https://github.com/argoproj/argo-events/commit/f7216dfa9b59234da2f83647945dc63f5df627bb) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.58 to 0.9.66 (#2748) + * [00aced42](https://github.com/argoproj/argo-events/commit/00aced42740d9be6721c7f106778b8a6cc073a8e) chore(deps): bump actions/setup-go from 3.5.0 to 4.1.0 (#2749) + * [14eeaf62](https://github.com/argoproj/argo-events/commit/14eeaf624ce4cf872be917c797de3cda4a0024de) chore(deps): bump github.com/tidwall/gjson from 1.14.4 to 1.16.0 (#2745) + * [a2683682](https://github.com/argoproj/argo-events/commit/a268368260d7f85aa8c99727cce809442312d656) chore(deps): bump github.com/pkg/sftp from 1.13.5 to 1.13.6 (#2746) + * [61ae42ab](https://github.com/argoproj/argo-events/commit/61ae42ab7e36ba3751352a91e436b2a6c9dec75b) chore(deps): bump github.com/antonmedv/expr from 1.12.5 to 1.13.0 (#2747) + * [e620f03c](https://github.com/argoproj/argo-events/commit/e620f03cd848b8ded1d8731301dd24bb2dfb7cdd) chore(deps): bump google.golang.org/api from 0.124.0 to 0.136.0 (#2744) + * [cd081b19](https://github.com/argoproj/argo-events/commit/cd081b1908f38d2d80f030219187409477461a7b) fix: Doc: Argo Workflow apply command link 404 #2737 (#2739) + * [029b6453](https://github.com/argoproj/argo-events/commit/029b645321d7bb5bdb22a6b8bc32c7e3c1d95fab) chore(deps): bump golang.org/x/crypto from 0.9.0 to 0.12.0 (#2735) + * [3a76997a](https://github.com/argoproj/argo-events/commit/3a76997aa9ab3ef521b68417e7da440afed50846) chore(deps): bump github.com/xanzy/go-gitlab from 0.83.0 to 0.90.0 (#2734) + * [f6eba926](https://github.com/argoproj/argo-events/commit/f6eba926e91066f18a850ca7e8389d40dd2ac6c5) SFTP event source (#2693) + * [2fbbd725](https://github.com/argoproj/argo-events/commit/2fbbd7254ff6644bcce6b65d4aec19a8774be574) chore(deps): bump github.com/eclipse/paho.mqtt.golang from 1.4.1 to 1.4.3 (#2694) + * [d5d7ccf4](https://github.com/argoproj/argo-events/commit/d5d7ccf417fc52d224e36c8250b3a937af5c7903) chore(deps): bump google.golang.org/grpc from 1.55.0 to 1.56.2 (#2696) + * [34a70aaa](https://github.com/argoproj/argo-events/commit/34a70aaa1d85f204e72e792f8c6cd2e6fbed0a10) chore(deps): bump sigstore/cosign-installer from 3.0.5 to 3.1.1 (#2691) + * [8e3209f2](https://github.com/argoproj/argo-events/commit/8e3209f26b4886b2d51ddd8699f17a32d8ce0052) chore(deps): bump dependabot/fetch-metadata from 1.4.0 to 1.6.0 (#2690) + * [8500ed98](https://github.com/argoproj/argo-events/commit/8500ed98f8b264f457650824e2633221e932ee8d) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.57 to 7.0.58 (#2678) + * [54c9d401](https://github.com/argoproj/argo-events/commit/54c9d401f00c1a47336fb16f0c4ea8fe6d3b8634) feat(eventsource): add tls configuration for bitbucketserver (#2674) + * [be473375](https://github.com/argoproj/argo-events/commit/be4733752883d5ff35ba21d7b4a2baba1488f291) fix: use cluster domain neutral svc dns (#2655) + * [efcbd108](https://github.com/argoproj/argo-events/commit/efcbd108a4c53e5aadb4ddeb2b480f3c314bca21) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.52 to 7.0.57 (#2661) + * [5989233d](https://github.com/argoproj/argo-events/commit/5989233de0dcf4b4dab3bc707d9602d65d865054) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.3.0 to 1.4.0 (#2652) + * [0eb721b5](https://github.com/argoproj/argo-events/commit/0eb721b5a645823e5dfa207431eb7365e680c314) chore(deps): bump github.com/spf13/viper from 1.15.0 to 1.16.0 (#2643) + * [44328eb7](https://github.com/argoproj/argo-events/commit/44328eb7a24cf957ce7ed2f18c2f18694a79fa34) chore(deps): bump github.com/go-git/go-git/v5 from 5.6.1 to 5.7.0 (#2634) + * [5e076efb](https://github.com/argoproj/argo-events/commit/5e076efbf25b4ee009a4cd75575cc9fe7ce4616d) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 from 3.5.0 to 3.6.0 (#2633) + * [2aa2632e](https://github.com/argoproj/argo-events/commit/2aa2632e01cb5e461d3df0ef5394e150b47e9625) chore(deps): bump cloud.google.com/go/pubsub from 1.30.1 to 1.31.0 (#2632) + * [adfa18dc](https://github.com/argoproj/argo-events/commit/adfa18dc37b4681af1c657c6f6d0a967922a42ea) chore(deps): bump actions/setup-python from 4.6.0 to 4.6.1 (#2631) + * [4847b820](https://github.com/argoproj/argo-events/commit/4847b820e3d6f286d61d0cb86d0d7132348093fa) feat: Allow to base64 decode messages from Azure Queue Storage (#2627) + * [553d1a17](https://github.com/argoproj/argo-events/commit/553d1a17406151370af4f4f4931d8dfef639137e) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.55 to 0.9.58 (#2624) + * [e077a22f](https://github.com/argoproj/argo-events/commit/e077a22f7da1fde14c4cb337da4fca6d21896168) chore(deps): bump google.golang.org/api from 0.120.0 to 0.123.0 (#2626) + * [2bf9f7b7](https://github.com/argoproj/argo-events/commit/2bf9f7b7f23ec0a89972aee1469c0a158e2aa0c3) chore(deps): bump sigstore/cosign-installer from 3.0.3 to 3.0.5 (#2623) + * [23056868](https://github.com/argoproj/argo-events/commit/2305686841d1ee709f84260f58aceae0af851b12) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.2.2 to 1.3.0 (#2618) + * [94c7dcc7](https://github.com/argoproj/argo-events/commit/94c7dcc742e3a21132c98522deb7ba12a6522bea) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.2.1 to 1.3.0 (#2615) + * [bdf593a2](https://github.com/argoproj/argo-events/commit/bdf593a25d444659db070ce70828583882d80708) chore(deps): bump golang.org/x/crypto from 0.8.0 to 0.9.0 (#2619) + * [e705449b](https://github.com/argoproj/argo-events/commit/e705449bef68960852f3e9a484e4f14a7ae2e9c1) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue from 0.1.0 to 1.0.0 (#2617) + * [cc9fa2de](https://github.com/argoproj/argo-events/commit/cc9fa2de6d195ce24471ffeb811b554a29a3be21) Upgrade nats image to 2.9.16 (#2609) + * [407fe433](https://github.com/argoproj/argo-events/commit/407fe43314ffd498d40a694dc857e0f6e47f860c) fix(kafkaEventBus): Reset kafka consumer on cleanup (#2611) + +### Contributors + + * David Farr + * Derek Wang + * Eduardo Rodrigues + * Maksym Verbovyi + * Robert Deusser + * Son Bui + * dependabot[bot] + * dillonstreator + * igor-enso + +## v1.8.0 (2023-05-07) + + * [57376875](https://github.com/argoproj/argo-events/commit/57376875fb47ab3f5d0960591c29a50b7b372748) Update manifests to v1.8.0 + * [af0bd86b](https://github.com/argoproj/argo-events/commit/af0bd86b4f2e86bcb82790ddebaa487395a48696) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.8.0 to 1.8.1 (#2607) + * [a0a44426](https://github.com/argoproj/argo-events/commit/a0a444261176a03af85ea456c170903d1e55213c) chore(deps): bump cloud.google.com/go/pubsub from 1.30.0 to 1.30.1 (#2606) + * [93e72091](https://github.com/argoproj/argo-events/commit/93e72091d09b479d02b10fa3b5cef8ffde364f18) chore(deps): bump github.com/prometheus/client_golang from 1.15.0 to 1.15.1 (#2605) + * [3f9db1b2](https://github.com/argoproj/argo-events/commit/3f9db1b27c313a0135d733cd8ded2a3b5bbbb896) chore(deps): bump google.golang.org/grpc from 1.54.0 to 1.55.0 (#2604) + * [58d56dde](https://github.com/argoproj/argo-events/commit/58d56dde64306b1cf2d56204f32a22eaf3f11515) chore: adding tests for the removeConflicts function & the Convert2WaitBackoff function (#2600) + * [42fc8e4c](https://github.com/argoproj/argo-events/commit/42fc8e4c554e44ff51467fde8890d904dd0a5734) Fix: typo on azure queue storage event source (#2598) + * [419de082](https://github.com/argoproj/argo-events/commit/419de082018c9a8168dec592c986e70144ba61ea) chore(deps): bump github.com/bradleyfalzon/ghinstallation/v2 from 2.3.0 to 2.4.0 (#2597) + * [c1e0e8b7](https://github.com/argoproj/argo-events/commit/c1e0e8b7405763794c96434988da66ee2b320f79) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azidentity from 1.1.0 to 1.2.2 (#2596) + * [4b868e50](https://github.com/argoproj/argo-events/commit/4b868e5021d2ea584d315ad61183d1345483a72f) chore(deps): bump google.golang.org/api from 0.119.0 to 0.120.0 (#2595) + * [04b70df3](https://github.com/argoproj/argo-events/commit/04b70df3aad531e240b6de982aad34b3ac53437a) chore(deps): bump sigstore/cosign-installer from 3.0.2 to 3.0.3 (#2594) + * [0256f0e0](https://github.com/argoproj/argo-events/commit/0256f0e03e188e5ca7e7e7c6a1b91cd3f4c5fdc5) feat: update log level for some lines to Info and expand logged fields (#2592) + * [14d5975b](https://github.com/argoproj/argo-events/commit/14d5975bf75ea3ed96eb59e715ae4ec9f4f35cfd) feat: add support for Azure Managed Identity in Azure Service Bus event source (#2591) + * [5581431f](https://github.com/argoproj/argo-events/commit/5581431f19521f0f557b2b301001b593a9368f6c) fix: make start to also work with arm64 (#2590) + * [b51a5769](https://github.com/argoproj/argo-events/commit/b51a57692c9e359bf6570c2f57e4e04d298102b8) feat: add azure queue storage event source (#2589) + * [06f39767](https://github.com/argoproj/argo-events/commit/06f3976793750b206a5c4ff16a064b84b64a2b7d) chore(deps): bump github.com/xanzy/go-gitlab from 0.81.0 to 0.83.0 (#2586) + * [87fafb24](https://github.com/argoproj/argo-events/commit/87fafb2404d22f58e64d5049185bc42d4d713a7d) chore(deps): bump google.golang.org/api from 0.118.0 to 0.119.0 (#2585) + * [f2e3183d](https://github.com/argoproj/argo-events/commit/f2e3183d19836e9637b7449062741aad8eec91c1) chore(deps): bump actions/setup-python from 4.5.0 to 4.6.0 (#2583) + * [ea7ce60c](https://github.com/argoproj/argo-events/commit/ea7ce60ca6509e34b623412a599fe6ca947798a5) chore(deps): bump dependabot/fetch-metadata from 1.3.5 to 1.4.0 (#2584) + * [7120ac1f](https://github.com/argoproj/argo-events/commit/7120ac1f33af09d35c3deb977e770245040cfd64) chore(deps): bump google.golang.org/api from 0.116.0 to 0.118.0 (#2578) + * [b8a2ea89](https://github.com/argoproj/argo-events/commit/b8a2ea89b373ae2e226417168fd046e1273f0a10) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.50 to 7.0.52 (#2579) + * [a202a1a7](https://github.com/argoproj/argo-events/commit/a202a1a74e01521185b2e73044cacabb36499ff4) chore(deps): bump github.com/prometheus/client_golang from 1.14.0 to 1.15.0 (#2576) + * [33c82729](https://github.com/argoproj/argo-events/commit/33c8272916de04af20cc12cae255d5e79f95d2df) chore(deps): bump github.com/slack-go/slack from 0.12.1 to 0.12.2 (#2575) + * [2e154b63](https://github.com/argoproj/argo-events/commit/2e154b638fff421e1f1bf561da8c56a92c3b2cde) feat: add headers in kafka event source (#2555) + * [6ea2998f](https://github.com/argoproj/argo-events/commit/6ea2998ffa9a5496b21605a783697b4f5d542bc3) feat: support athenz authentication for pulsar (#2559) + * [35fafb7e](https://github.com/argoproj/argo-events/commit/35fafb7ea67ca8b42292b7b80a20f8d705685500) chore(deps): bump golang.org/x/crypto from 0.7.0 to 0.8.0 (#2569) + * [9572c961](https://github.com/argoproj/argo-events/commit/9572c9612f9b9c9b6c91cb3ead8ced57c991d8e2) chore(deps): bump github.com/bradleyfalzon/ghinstallation/v2 from 2.2.0 to 2.3.0 (#2570) + * [e48fdda9](https://github.com/argoproj/argo-events/commit/e48fdda97a881a743d7b58e75bad41fc6d34f470) chore(deps): bump google.golang.org/api from 0.114.0 to 0.116.0 (#2565) + * [6eefc397](https://github.com/argoproj/argo-events/commit/6eefc3970a464c061f7a32e7b1a968926f84518e) chore(deps): bump github.com/spf13/cobra from 1.6.1 to 1.7.0 (#2562) + * [40e0a204](https://github.com/argoproj/argo-events/commit/40e0a204f7ff0160d4f562b8c6f6707a7ad0d89e) chore(deps): bump peter-evans/create-pull-request from 4 to 5 (#2561) + * [253aa187](https://github.com/argoproj/argo-events/commit/253aa18762cd0181d384a83c6b76eb5f04eb8259) chore(deps): bump sigstore/cosign-installer from 3.0.1 to 3.0.2 (#2560) + * [9dab78bd](https://github.com/argoproj/argo-events/commit/9dab78bd29241c5ee40495fb34c37d7ff1bc7bf2) feat: Add golang metrics to argo-events metrics registry. (#2557) + * [f5ad05cb](https://github.com/argoproj/argo-events/commit/f5ad05cb4b79687f393aa2e28cca378a5285f15c) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 from 3.4.0 to 3.5.0 (#2553) + * [a6b7201d](https://github.com/argoproj/argo-events/commit/a6b7201d0d8cb3099e4eac8402f5374982343ef2) fix: gitlab group missing token (#2550) + * [711c672d](https://github.com/argoproj/argo-events/commit/711c672d772f55170c2482a5a884977a99bbce6d) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.47 to 7.0.50 (#2546) + * [132c65ac](https://github.com/argoproj/argo-events/commit/132c65ac07fa8e002b0334f46cd268123c354f04) chore(deps): bump cloud.google.com/go/pubsub from 1.29.0 to 1.30.0 (#2542) + * [af96ec24](https://github.com/argoproj/argo-events/commit/af96ec2431909d82b4782b6b88012426193535f8) chore(deps): bump github.com/antonmedv/expr from 1.12.3 to 1.12.5 (#2545) + * [1f41d5fa](https://github.com/argoproj/argo-events/commit/1f41d5fa6d875d8e0de41008d3e58230d8d59230) chore(deps): bump github.com/nats-io/nats.go from 1.24.0 to 1.25.0 (#2544) + * [55c5fefe](https://github.com/argoproj/argo-events/commit/55c5fefe1e526cb124d8cc94529b885d1f78b1c9) chore(deps): bump github.com/imdario/mergo from 0.3.14 to 0.3.15 (#2543) + * [8f64a074](https://github.com/argoproj/argo-events/commit/8f64a074fa3ba8797b82706c74f6460e723af331) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.7.0 to 1.8.0 (#2541) + * [b85b3f0a](https://github.com/argoproj/argo-events/commit/b85b3f0a4bda29b2d952f2509a467629de209a26) chore(deps): bump google.golang.org/grpc from 1.53.0 to 1.54.0 (#2540) + * [55b8b71b](https://github.com/argoproj/argo-events/commit/55b8b71b4ee6ddc0673b380dd95d893b902af480) fix(docs): kafka eventbus docs links (#2538) + * [4fbe471a](https://github.com/argoproj/argo-events/commit/4fbe471a360e7c8c6739c83d82d931a6b56a87de) fix(ci): setup-go treat 1.20 as 1.2 (#2537) + * [95a0877f](https://github.com/argoproj/argo-events/commit/95a0877fa5cbae75471dfb5b4466109259389b91) feat: Kafka EventBus (#2502) + * [419edeb5](https://github.com/argoproj/argo-events/commit/419edeb58418712d1e094ee8ae5ca0dbb45d475e) Chore: go1.20 (#2534) + * [742f7684](https://github.com/argoproj/argo-events/commit/742f7684c34500ab82bc0bb0788ba65c740ce819) chore(deps): bump github.com/go-git/go-git/v5 from 5.4.2 to 5.6.1 (#2529) + * [ed350343](https://github.com/argoproj/argo-events/commit/ed350343831643f83a63e98c0504af9fa178f2c5) chore(deps): bump github.com/google/go-github/v50 from 50.1.0 to 50.2.0 (#2531) + * [675c857b](https://github.com/argoproj/argo-events/commit/675c857bd257a8a7339e89ed7c95f09f4294f108) chore(deps): bump cloud.google.com/go/pubsub from 1.28.0 to 1.29.0 (#2530) + * [d234e21f](https://github.com/argoproj/argo-events/commit/d234e21f2b418c4edbf5a87324564741cbd7cded) chore(deps): bump github.com/imdario/mergo from 0.3.13 to 0.3.14 (#2527) + * [faceda0e](https://github.com/argoproj/argo-events/commit/faceda0e346431cf8bb4afc13ad3c6fd670ec4cc) fix github sensor example to use generateName (#2522) + * [65e4ad96](https://github.com/argoproj/argo-events/commit/65e4ad96783d959aa3f818cba364d1ed42a133f3) feat(webhook): add additional env vars (#2356) + * [371edd28](https://github.com/argoproj/argo-events/commit/371edd2868283257153b5f0f2515f14a95e16e00) chore(deps): bump google.golang.org/protobuf from 1.29.0 to 1.29.1 (#2520) + * [fb99769d](https://github.com/argoproj/argo-events/commit/fb99769df3581901d0c27bd9d960fd457a6bbc9e) add labels to jetstream statefulset generated by eventbus controller (#2510) + * [1cd2fc15](https://github.com/argoproj/argo-events/commit/1cd2fc151a3817ab2462c52f1dc571fdd3707c4e) chore(deps): bump github.com/xanzy/go-gitlab from 0.80.0 to 0.81.0 (#2515) + * [68e18f4b](https://github.com/argoproj/argo-events/commit/68e18f4b9f57d56d0c598f6a54bbff460984dbf8) chore(deps): bump google.golang.org/api from 0.111.0 to 0.112.0 (#2511) + * [eb1cb913](https://github.com/argoproj/argo-events/commit/eb1cb9136a9d240b575b4eedd72581a3a52f0235) chore(deps): bump github.com/antonmedv/expr from 1.12.1 to 1.12.3 (#2516) + * [0d783723](https://github.com/argoproj/argo-events/commit/0d78372378675c7f7a7d60bb8e701c6c333f2634) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.2.0 to 1.2.1 (#2514) + * [f3921412](https://github.com/argoproj/argo-events/commit/f3921412f3045e7d3d1f928a9e7957a0b5866e37) chore(deps): bump github.com/golang/protobuf from 1.5.2 to 1.5.3 (#2512) + * [8d02ba00](https://github.com/argoproj/argo-events/commit/8d02ba006cd530eea509f5cf4343df1f9e49c257) chore(deps): bump golang.org/x/crypto from 0.6.0 to 0.7.0 (#2513) + * [242324d7](https://github.com/argoproj/argo-events/commit/242324d7d2606794b87a5faca7a6b703ed4075f6) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.13.0 to 2.14.0 (#2503) + * [66abc6b0](https://github.com/argoproj/argo-events/commit/66abc6b0e4d6518c610963538a8b36f55c5ee5d1) chore(deps): bump github.com/itchyny/gojq from 0.12.11 to 0.12.12 (#2501) + * [5f8c5a83](https://github.com/argoproj/argo-events/commit/5f8c5a838bf9cd44ac8374b7bb4f8fc38d10cb9e) chore(deps): bump google.golang.org/api from 0.109.0 to 0.111.0 (#2499) + * [0d727dce](https://github.com/argoproj/argo-events/commit/0d727dceade89b0d39333453a6cf9b34163827b8) chore(deps): bump sigstore/cosign-installer from 2.8.1 to 3.0.1 (#2498) + * [ee44afd0](https://github.com/argoproj/argo-events/commit/ee44afd01e8b77de2831fc666dd35c52bbc25b0b) chore(ci): replace deprecated command (#2496) + * [82a98db5](https://github.com/argoproj/argo-events/commit/82a98db5c0e058e65da822b4263dc7ded05db562) fix(GithubEventSource): GitHub Enterprise auth logic broken (#2494) (#2495) + * [f28cde2e](https://github.com/argoproj/argo-events/commit/f28cde2edd923e2846b4e4e19109d38a0c0d80d9) fix(controller): use a copied object to update because of mutation (#2466) + * [201b833d](https://github.com/argoproj/argo-events/commit/201b833d6b89459ad1d0685f2433c8bb515361c1) Add Salesforce in USERS.md (#2493) + * [90a2419f](https://github.com/argoproj/argo-events/commit/90a2419f0b7ee3dd7bda461ecb4aeeb77f24f90e) Add Pinnacle Reliability to user list (#2483) + * [fedac970](https://github.com/argoproj/argo-events/commit/fedac970bf5c22ca8d366ab2c9f9da75423c46bf) chore(deps): bump github.com/argoproj/notifications-engine from 0.3.1-0.20221203221941-490d98afd1d6 to 0.4.0 (#2491) + * [b5c92299](https://github.com/argoproj/argo-events/commit/b5c92299c7e2cd560585a82f489fa376ecfd1ea6) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.12.0 to 2.13.0 (#2490) + * [1b40edd2](https://github.com/argoproj/argo-events/commit/1b40edd2fc475993f85e9878a92de0b811ee0734) chore(deps): bump github.com/stretchr/testify from 1.8.1 to 1.8.2 (#2489) + * [92385eb7](https://github.com/argoproj/argo-events/commit/92385eb7a4ad7fa867ebb822ad27dab16e19d0cf) chore(deps): bump github.com/nats-io/nats.go from 1.23.0 to 1.24.0 (#2488) + * [dbd78c55](https://github.com/argoproj/argo-events/commit/dbd78c55fe45a6838090eef5ba981f9d1cde6fdd) chore(deps): bump github.com/antonmedv/expr from 1.10.5 to 1.12.1 (#2486) + * [f6973a80](https://github.com/argoproj/argo-events/commit/f6973a800fe6c80b9cb2a4e46bd1539c510ee729) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.199 to 1.44.209 (#2485) + * [0564890e](https://github.com/argoproj/argo-events/commit/0564890ea09adb6dd30f6052cd54becb1bfc4c75) feat(eventsource): Gitlab eventsource - support groups (#2386) (#2474) + * [a5766017](https://github.com/argoproj/argo-events/commit/a57660170ea6a9641c7853b6c04676a7a487bdc7) chore(deps): bump google.golang.org/grpc from 1.52.3 to 1.53.0 (#2472) + * [75fd39cc](https://github.com/argoproj/argo-events/commit/75fd39cc04b9c201219936e0afcbc9e74fbd78d2) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.9.0 to 2.12.0 (#2473) + * [e730e2e7](https://github.com/argoproj/argo-events/commit/e730e2e7bcd4aa56fe92f6b1c8da1018470d2883) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.4 to 1.2.0 (#2471) + * [2615b0e3](https://github.com/argoproj/argo-events/commit/2615b0e32c3a6f8411a0cdf3d48d851705144920) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.184 to 1.44.199 (#2470) + * [aa9dd062](https://github.com/argoproj/argo-events/commit/aa9dd062549a0ab13d7dad01c51ce52461ca00d5) chore(deps): bump github.com/xanzy/go-gitlab from 0.79.1 to 0.80.0 (#2469) + * [16eebe83](https://github.com/argoproj/argo-events/commit/16eebe837b5491912221042c227a46947589d999) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.6.1 to 1.7.0 (#2468) + * [bdbbc3ec](https://github.com/argoproj/argo-events/commit/bdbbc3ecad8dbcb590cfaa862955b5a8da7809e0) chore(deps): bump golang.org/x/crypto from 0.5.0 to 0.6.0 (#2467) + * [94bc4378](https://github.com/argoproj/argo-events/commit/94bc43783db74b2d1e1b9dce2f96a9f30ed138d5) feat: Optional kubernetes-based leader election (#2438) + * [c265190c](https://github.com/argoproj/argo-events/commit/c265190c9d2dc8fa8e436804cf306e37fb3af57a) fix: remove the secret watch privilege dependency from js eb ctrler (#2453) + * [54d4c340](https://github.com/argoproj/argo-events/commit/54d4c34031751ca2682bdabcf25c4c181b798611) chore(deps): bump github.com/xanzy/go-gitlab from 0.78.0 to 0.79.1 (#2460) + * [dc03451c](https://github.com/argoproj/argo-events/commit/dc03451c8641fd1b8564c569b61f42cc8a454094) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.6.0 to 1.6.1 (#2459) + * [902bbf37](https://github.com/argoproj/argo-events/commit/902bbf37de4b8ec78952f4149d7833eb3f0348d3) chore(deps): bump google.golang.org/api from 0.108.0 to 0.109.0 (#2456) + * [236a9d52](https://github.com/argoproj/argo-events/commit/236a9d522df634cb150daa54c93e4d24724b8725) chore(deps): bump google.golang.org/grpc from 1.52.0 to 1.52.3 (#2448) + * [200b1acc](https://github.com/argoproj/argo-events/commit/200b1acc819859bdbe7a3498bb60d918ed96859d) chore(deps): bump github.com/antonmedv/expr from 1.10.1 to 1.10.5 (#2446) + * [45573056](https://github.com/argoproj/argo-events/commit/45573056a6b6f0e816e1e41d3c90b2eda3f04520) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.8.0 to 2.9.0 (#2434) + * [90258b81](https://github.com/argoproj/argo-events/commit/90258b81513af57cc57c47890346ba55c185df7b) chore(deps): bump github.com/antonmedv/expr from 1.9.0 to 1.10.1 (#2433) + * [da6e48ee](https://github.com/argoproj/argo-events/commit/da6e48ee94bd0e36b0d3a678b42f6c02f7e28228) chore(deps): bump github.com/spf13/viper from 1.14.0 to 1.15.0 (#2432) + * [e24e8839](https://github.com/argoproj/argo-events/commit/e24e8839f7d7615950948946628d84a8ca038f7b) chore(deps): bump github.com/go-swagger/go-swagger from 0.29.0 to 0.30.4 (#2431) + * [0945e450](https://github.com/argoproj/argo-events/commit/0945e450e2bb7a1bf8a6db5a91ea736e6d0c0bb5) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.180 to 1.44.184 (#2429) + * [6e07599b](https://github.com/argoproj/argo-events/commit/6e07599b29871c1153d3ebac7c99972ac34491e7) chore(deps): bump github.com/nats-io/nats.go from 1.22.1 to 1.23.0 (#2428) + * [21872357](https://github.com/argoproj/argo-events/commit/2187235770d9ba7276a26bb269d859f21bdd906f) chore(deps): bump google.golang.org/api from 0.107.0 to 0.108.0 (#2430) + * [5a7c0c79](https://github.com/argoproj/argo-events/commit/5a7c0c7907d1b072e5daeae28e3c1fedb6418e90) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.4.0 to 1.6.0 (#2427) + * [d8201b45](https://github.com/argoproj/argo-events/commit/d8201b45e5430d741461fd3233296ac1c25a135f) fix: kafka schema registry (#2423) + * [33c7a35c](https://github.com/argoproj/argo-events/commit/33c7a35c251f2b0ce69c0bbe7cc57f545c58d4db) Implement optional at least once semantics (#2404) + * [dab8a187](https://github.com/argoproj/argo-events/commit/dab8a187af6ed64b2fa7520cfaa4069561a2ceeb) feat: allow granular secret privileges. additional controller logging/leader-election options (#2411) + * [b01bf19e](https://github.com/argoproj/argo-events/commit/b01bf19e777a797219ccfce36863a097ca240915) chore(deps): bump github.com/Shopify/sarama from 1.37.0 to 1.38.0 (#2419) + * [5bd20f63](https://github.com/argoproj/argo-events/commit/5bd20f63ba6d1d4d99e924a183b3ccb093c41b58) chore(deps): bump google.golang.org/api from 0.106.0 to 0.107.0 (#2420) + * [f6541d17](https://github.com/argoproj/argo-events/commit/f6541d17d9c8e2515aca03912cc6d2a2def0cbfd) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.46 to 7.0.47 (#2421) + * [430b03bf](https://github.com/argoproj/argo-events/commit/430b03bf942a0147512ecb7d836786f4e8c36dd1) chore(deps): bump google.golang.org/grpc from 1.51.0 to 1.52.0 (#2418) + * [bf3c3185](https://github.com/argoproj/argo-events/commit/bf3c318542244e6ee73c56b04c0450a431606794) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.3 to 1.1.4 (#2417) + * [5c9e89de](https://github.com/argoproj/argo-events/commit/5c9e89dea29d76cac5844d147a1da7926452e820) chore(deps): bump github.com/nats-io/stan.go from 0.10.3 to 0.10.4 (#2416) + * [34bb306b](https://github.com/argoproj/argo-events/commit/34bb306b864f8732fb860195cee4e04c90b75c70) chore(deps): bump github.com/xanzy/go-gitlab from 0.76.0 to 0.78.0 (#2414) + * [84ef54a3](https://github.com/argoproj/argo-events/commit/84ef54a3e02305a7ff513b72a54b06288d5cf344) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.11.0 to 2.13.0 (#2415) + * [025109aa](https://github.com/argoproj/argo-events/commit/025109aa378e8240202d7931575254161155e4a1) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.175 to 1.44.180 (#2413) + * [2e1b9439](https://github.com/argoproj/argo-events/commit/2e1b94392a94c108567a138cb8eccebe5261eca4) chore(deps): bump actions/setup-python from 4.4.0 to 4.5.0 (#2412) + * [86926f3b](https://github.com/argoproj/argo-events/commit/86926f3b311db595e8a573f9e1ea2f7112cb8fff) fix: cloneDirectory validation on git artifcatory spec (#2407) + * [26747dfd](https://github.com/argoproj/argo-events/commit/26747dfdef67f78cc83641c6ce18dacfb4ee3473) feat(sensor): Kafka Trigger - support avro/schemaRegistry (#2385) + * [45f737d3](https://github.com/argoproj/argo-events/commit/45f737d3ee1a45f5469f213a62ba2763bc4b5668) chore(deps): bump golang.org/x/crypto from 0.4.0 to 0.5.0 (#2401) + * [469b3147](https://github.com/argoproj/argo-events/commit/469b31472f8cce1a9587ec2b024d65a904d597e6) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.171 to 1.44.175 (#2400) + * [d9419234](https://github.com/argoproj/argo-events/commit/d9419234d6e1858cf4110aba697b92b166f7df0e) chore(deps): bump google.golang.org/api from 0.104.0 to 0.106.0 (#2399) + * [62be2408](https://github.com/argoproj/argo-events/commit/62be2408dc7b63fca533cc28107ad490efff650a) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.45 to 7.0.46 (#2398) + * [d75aa6fc](https://github.com/argoproj/argo-events/commit/d75aa6fc4242b78c93c30737637d78a38f668c24) NATS event data - add header field (#2396) + * [9a0759d4](https://github.com/argoproj/argo-events/commit/9a0759d47f0dcf54dbc097b7e08c865846920470) fix: fix bug in evaluation of filters with filtersLogicalOperator=or (#2374) + * [1428cae9](https://github.com/argoproj/argo-events/commit/1428cae94225107eabf08b7297a50402c93e393c) Implement multiple partions usage in Kafka trigger (#2360) + * [3cbcd720](https://github.com/argoproj/argo-events/commit/3cbcd7200d2bf7588dd1b562ac7a6fe5c9e882e9) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.162 to 1.44.171 (#2387) + * [29da45b5](https://github.com/argoproj/argo-events/commit/29da45b595bc4b2a1b0210a188ad0aa1108a7e33) chore(deps): bump github.com/nats-io/nats.go from 1.21.0 to 1.22.1 (#2381) + * [6fa130ec](https://github.com/argoproj/argo-events/commit/6fa130ec31ee5c06a142f991549479b6796f9b49) chore(deps): bump github.com/slack-go/slack from 0.12.0 to 0.12.1 (#2379) + * [c4841e4b](https://github.com/argoproj/argo-events/commit/c4841e4b45d774be172b42c1e7e362c07a1a7073) chore(deps): bump github.com/itchyny/gojq from 0.12.10 to 0.12.11 (#2380) + * [56d82fa1](https://github.com/argoproj/argo-events/commit/56d82fa1494a85c78520559e93382b66b52f10ca) chore(deps): bump actions/setup-python from 4.3.0 to 4.4.0 (#2377) + * [d0d3fcd3](https://github.com/argoproj/argo-events/commit/d0d3fcd3b757462e185219d1b07241c05393b301) chore(deps): bump actions/stale from 6 to 7 (#2378) + * [b9e4bfe8](https://github.com/argoproj/argo-events/commit/b9e4bfe835fc79a759ba7abc9f47099990d94ec9) fix: Fixed Github Sensor example and minor doc correction (#2373) + * [683947f2](https://github.com/argoproj/argo-events/commit/683947f21de11dee1083c0386b678861e6d961b9) feat: Expand Slack Trigger Capabilities - blocks,attachments,threads and more (#2369) + * [cc2064e8](https://github.com/argoproj/argo-events/commit/cc2064e85f80f20685b66ceff85a06587a649456) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.157 to 1.44.162 (#2368) + * [31e28673](https://github.com/argoproj/argo-events/commit/31e2867321bfcdc3a227d567f058976159a1d52b) chore(deps): bump cloud.google.com/go/compute/metadata from 0.2.2 to 0.2.3 (#2366) + * [6ef93479](https://github.com/argoproj/argo-events/commit/6ef93479fef6f0a1bd20c096eef5493928719ba0) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.7.0 to 2.8.0 (#2365) + * [5aaf819c](https://github.com/argoproj/argo-events/commit/5aaf819c8d682790591daef586bcfb7451b5f613) chore(deps): bump github.com/slack-go/slack from 0.11.2 to 0.12.0 (#2364) + * [2c355456](https://github.com/argoproj/argo-events/commit/2c355456da52a7c4ab2fa79b4c9d607fa124a1a8) chore(deps): bump actions/setup-go from 3.4.0 to 3.5.0 (#2361) + * [484f17c6](https://github.com/argoproj/argo-events/commit/484f17c658f6ac01a8be033f2ac5fed4a9b1a859) Make kafka eventsource compatible with samara 1.37.0 Addresses #2358 (#2359) + * [6c076960](https://github.com/argoproj/argo-events/commit/6c076960bba59a73d67e195d8f54eb96cf7e113e) fix: typo in gitlab example eventsource (#2353) + * [3cca40bf](https://github.com/argoproj/argo-events/commit/3cca40bf6203f7fef0f6f387fb42ce697b1dfaa9) chore(deps): bump github.com/nats-io/nats.go from 1.20.0 to 1.21.0 (#2351) + * [4609cdb7](https://github.com/argoproj/argo-events/commit/4609cdb75b8342fae336cd164234508a187460cb) chore(deps): bump google.golang.org/api from 0.103.0 to 0.104.0 (#2350) + * [9766f0ce](https://github.com/argoproj/argo-events/commit/9766f0ce3c73cff19dca15650eabf3e165f2f3d9) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 from 3.3.19 to 3.4.0 (#2347) + * [73885c48](https://github.com/argoproj/argo-events/commit/73885c488cce9324295882f630c3e7e4aed8ff6c) chore(deps): bump golang.org/x/crypto from 0.3.0 to 0.4.0 (#2348) + * [b90f5b53](https://github.com/argoproj/argo-events/commit/b90f5b53f5c73d9bc54d72aba8a455a5fb9e1dab) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.152 to 1.44.157 (#2349) + * [e9efbdc2](https://github.com/argoproj/argo-events/commit/e9efbdc26c97befeae3758c76ffcd32c33d64bd3) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.48 to 0.9.55 (#2346) + * [8566233c](https://github.com/argoproj/argo-events/commit/8566233cf18280414e1d354dd6c68278deae52a0) chore(deps): bump cloud.google.com/go/pubsub from 1.27.1 to 1.28.0 (#2345) + * [a7c5d1bc](https://github.com/argoproj/argo-events/commit/a7c5d1bc99d6a326f2acd1a70398a75e481d8483) chore(deps): bump github.com/xdg-go/scram from 1.1.1 to 1.1.2 (#2343) + * [558bb637](https://github.com/argoproj/argo-events/commit/558bb637e9f4473813aa7284f0693c094d1ded45) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.6.1 to 2.7.0 (#2344) + * [99907ab1](https://github.com/argoproj/argo-events/commit/99907ab195f9da9acdda72b71d671133f6fcc871) chore(deps): bump go.uber.org/zap from 1.21.0 to 1.24.0 (#2335) + * [537eb323](https://github.com/argoproj/argo-events/commit/537eb32306cba92352e59c367e9d480cc94b1456) chore(deps): bump github.com/Masterminds/sprig/v3 from 3.2.0 to 3.2.3 (#2339) + * [371d1c0b](https://github.com/argoproj/argo-events/commit/371d1c0b7368d6852548b9363ae64f59de633828) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.44 to 7.0.45 (#2336) + * [ef56ec24](https://github.com/argoproj/argo-events/commit/ef56ec243d58de19352992a0b866662d4a2c0795) chore(deps): bump cloud.google.com/go/pubsub from 1.26.0 to 1.27.1 (#2337) + * [86065c3c](https://github.com/argoproj/argo-events/commit/86065c3ca9605ca5b6ab8651c591463571376775) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.145 to 1.44.152 (#2338) + * [0810165a](https://github.com/argoproj/argo-events/commit/0810165a390b6cb84772555b297103836d871f84) chore(deps): bump cloud.google.com/go/compute/metadata from 0.2.1 to 0.2.2 (#2334) + * [7b1f9497](https://github.com/argoproj/argo-events/commit/7b1f9497431f02dab917b43dd0dbfce9bd4008d9) chore(deps): bump github.com/itchyny/gojq from 0.12.9 to 0.12.10 (#2332) + * [e2eecdf9](https://github.com/argoproj/argo-events/commit/e2eecdf9a8ae6e28d73b988e36e5b790a42d9b81) chore(deps): bump actions/setup-go from 3.3.1 to 3.4.0 (#2331) + * [ab2c04fa](https://github.com/argoproj/argo-events/commit/ab2c04fa37d783322f8c32f41367db8fef73bfdc) feat: Azure Service Bus as a Trigger (#2280) + * [4464e575](https://github.com/argoproj/argo-events/commit/4464e575bbda8d338a0f54e3fcad0ebb912022fd) feat: Enable adding customized logging fields in sensor (#2325) + * [76b14c26](https://github.com/argoproj/argo-events/commit/76b14c26d7b7c45027bdc8a28c030653c7450bab) feat: Support non-string parameters. Closes #1236 (#2317) + * [69877793](https://github.com/argoproj/argo-events/commit/698777933a238d44827390ed0f95354a021a0d46) [issue-1863] username and password auth support for mqtt eventsource (#2324) + * [cb498812](https://github.com/argoproj/argo-events/commit/cb4988129c8a03780a373da6f84ff59db93c5846) chore(deps): bump github.com/tidwall/gjson from 1.14.3 to 1.14.4 (#2327) + * [87c02717](https://github.com/argoproj/argo-events/commit/87c0271741194593dc1f836afee804a16a6ea932) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.136 to 1.44.145 (#2330) + * [2084da88](https://github.com/argoproj/argo-events/commit/2084da88cbd53a83aa29da6eb705903fe9965336) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.4.1 to 2.6.1 (#2326) + * [a6b4f922](https://github.com/argoproj/argo-events/commit/a6b4f922242fa293c4fbb167c939b41ec86cf4ce) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.42 to 7.0.44 (#2329) + * [4affe993](https://github.com/argoproj/argo-events/commit/4affe993b0913a431de8fd10e9193a372c449d23) chore(deps): bump github.com/xanzy/go-gitlab from 0.75.0 to 0.76.0 (#2328) + * [4e952075](https://github.com/argoproj/argo-events/commit/4e952075f30a0c9ed0d9b53c9a636d53789a5dbd) chore(deps): bump github.com/xanzy/go-gitlab from 0.70.0 to 0.75.0 (#2320) + * [9329717b](https://github.com/argoproj/argo-events/commit/9329717b7226b8b11377ac0e9b7872e5470e9433) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.2.0 to 2.4.1 (#2322) + * [2836be6a](https://github.com/argoproj/argo-events/commit/2836be6aea254213e695113b190d1901720cad6c) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.2 to 1.1.3 (#2319) + * [b4b57424](https://github.com/argoproj/argo-events/commit/b4b5742475c022e9dd93ad45c5890a59371b44ab) eventbus controller: move fuzzer from cncf-fuzzing (#2314) + * [e4c24b06](https://github.com/argoproj/argo-events/commit/e4c24b06123d6316e6228aa84b19a4e2cdc4236a) chore(deps): bump google.golang.org/api from 0.100.0 to 0.103.0 (#2310) + * [480d09f8](https://github.com/argoproj/argo-events/commit/480d09f86904bb4f2ba95183633291c239ff7850) chore(deps): bump github.com/nats-io/nats.go from 1.19.1 to 1.20.0 (#2311) + * [27970d88](https://github.com/argoproj/argo-events/commit/27970d8862957a08c64997d217b5d7624831c844) chore(deps): bump github.com/spf13/viper from 1.12.0 to 1.14.0 (#2307) + * [99f70dd2](https://github.com/argoproj/argo-events/commit/99f70dd24135b6f200fc5dc19b6a22af4f78db89) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.126 to 1.44.136 (#2309) + * [bad1a70d](https://github.com/argoproj/argo-events/commit/bad1a70d1a957e20629cdec0fa54560991ae8360) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.1 to 1.1.2 (#2306) + * [b659594c](https://github.com/argoproj/argo-events/commit/b659594cf774578ddb1a22a2e67cc9ca40395cd1) chore(deps): bump github.com/prometheus/client_golang from 1.13.1 to 1.14.0 (#2308) + * [876baf6f](https://github.com/argoproj/argo-events/commit/876baf6f485afa0b3bb14fbc8bf4f8cb914fef83) fix: payload serialization in sensor. Fixes #2272 (#2273) + * [864dc8d5](https://github.com/argoproj/argo-events/commit/864dc8d5b9481b22604fb49bfbb3b34fb10e9805) fix: if key/value store already exists use that (#2293) + * [ce46986a](https://github.com/argoproj/argo-events/commit/ce46986a64e5d684b2a0c849d1ead772ac3fab1b) chore(deps): bump github.com/nats-io/nats.go from 1.19.0 to 1.19.1 (#2300) + * [f4dcbf89](https://github.com/argoproj/argo-events/commit/f4dcbf894ebe660723a6e4cf2815211a95e981a0) chore(deps): bump cloud.google.com/go/compute/metadata from 0.1.0 to 0.2.1 (#2303) + * [627830e7](https://github.com/argoproj/argo-events/commit/627830e76310eab8d238de03467ac0952ebfb595) chore(deps): bump dependabot/fetch-metadata from 1.3.4 to 1.3.5 (#2294) + * [ad5bf349](https://github.com/argoproj/argo-events/commit/ad5bf3492a6ed6b045e443bb9fc14c74a97e37cb) chore(deps): bump github.com/prometheus/client_golang from 1.13.0 to 1.13.1 (#2295) + * [776f573b](https://github.com/argoproj/argo-events/commit/776f573b54864164313599d0f6ff562e35dd3064) fix(docs): context filter documentation (#2277) + * [039de396](https://github.com/argoproj/argo-events/commit/039de396b2fe0dcf99b088e777a856755d706576) chore(deps): bump cloud.google.com/go/compute from 1.10.0 to 1.12.1 (#2284) + * [a49a5ebf](https://github.com/argoproj/argo-events/commit/a49a5ebf8929e5c39991f3dbaf946a9e662cc9f4) chore(deps): bump github.com/spf13/cobra from 1.6.0 to 1.6.1 (#2283) + * [ceef6f99](https://github.com/argoproj/argo-events/commit/ceef6f99d15af035d677fde569fbf6aefffde57f) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.121 to 1.44.126 (#2289) + * [afc2461f](https://github.com/argoproj/argo-events/commit/afc2461fdd544c46ef1fb0ce653b03024bce623e) chore(deps): bump cloud.google.com/go/pubsub from 1.24.0 to 1.26.0 (#2288) + * [77de738a](https://github.com/argoproj/argo-events/commit/77de738a3f67282015201ce70c473607d5ba9125) chore(deps): bump github.com/nats-io/nats.go from 1.18.0 to 1.19.0 (#2285) + * [0418899c](https://github.com/argoproj/argo-events/commit/0418899c69195df55776d797b5ad25650a1d026c) chore(deps): bump github.com/stretchr/testify from 1.8.0 to 1.8.1 (#2286) + * [d5ac0389](https://github.com/argoproj/argo-events/commit/d5ac038929502b0f9e060d868ac55f37f8c989f2) chore(deps): bump actions/setup-go from 3.2.0 to 3.3.1 (#2271) + * [80738fce](https://github.com/argoproj/argo-events/commit/80738fcea0a9fbba8e53f9301e517b6a6c2433f8) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.41 to 7.0.42 (#2269) + * [50f7e3cc](https://github.com/argoproj/argo-events/commit/50f7e3cca52e71e1370509fab22cd0e4cc327f42) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 from 3.3.18 to 3.3.19 (#2270) + * [b60db255](https://github.com/argoproj/argo-events/commit/b60db2554c1496cd32a53406a936fc9a23b80471) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.116 to 1.44.121 (#2267) + * [9c8f72b4](https://github.com/argoproj/argo-events/commit/9c8f72b4cfbfeff1c0ded3b5de1f09a9c59d2532) feat: update third_party dependencies (#2245) + * [d5cdae98](https://github.com/argoproj/argo-events/commit/d5cdae98805f384e88a95c1a6767fe9a6a1cfa58) chore(deps): bump google.golang.org/api from 0.98.0 to 0.99.0 (#2259) + * [2ce534e5](https://github.com/argoproj/argo-events/commit/2ce534e52f39f89596eea885afad10a03bd023b8) chore(deps): bump actions/setup-python from 4.2.0 to 4.3.0 (#2260) + * [49da9161](https://github.com/argoproj/argo-events/commit/49da916116b574b106c8aac51e631f9f2ed149ee) chore(deps): bump github.com/fsnotify/fsnotify from 1.5.4 to 1.6.0 (#2258) + * [3247a548](https://github.com/argoproj/argo-events/commit/3247a5484cd6be3dc6fe6495de3c485445483a85) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.0 to 1.1.1 (#2256) + * [179b85de](https://github.com/argoproj/argo-events/commit/179b85de9219a3bb63171a3a1e04841601efb5e8) chore(deps): bump github.com/spf13/cobra from 1.5.0 to 1.6.0 (#2255) + * [8df07db5](https://github.com/argoproj/argo-events/commit/8df07db51cbb760f74e30020854f4be2b9396887) chore(deps): bump google.golang.org/grpc from 1.50.0 to 1.50.1 (#2254) + * [b26cfc05](https://github.com/argoproj/argo-events/commit/b26cfc050d9c24daf455295784f7d50cae924cda) chore(deps): bump github.com/nats-io/nats.go from 1.17.0 to 1.18.0 (#2253) + * [c5ca7a87](https://github.com/argoproj/argo-events/commit/c5ca7a8770d074a32438d3aba579e988a638a2b8) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.114 to 1.44.116 (#2251) + * [a238386b](https://github.com/argoproj/argo-events/commit/a238386b06a7837f711271f000aa4c1d0a797673) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.40 to 7.0.41 (#2252) + * [8423ab73](https://github.com/argoproj/argo-events/commit/8423ab73906e88cfc3ebd386bf9ceaca1ca4c49b) fix(docs): partition as optional field for kafka eventsource fixes: #1502 (#2246) + * [00e5fd0d](https://github.com/argoproj/argo-events/commit/00e5fd0d60fa38bfdf72e938ec3311b1375b8bf5) feat: Revision History Limit for sensor. Closes #1786 (#2244) + * [32dd1251](https://github.com/argoproj/argo-events/commit/32dd125145908f670374fe4f524d29207d206ca0) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.39 to 7.0.40 (#2241) + * [3391ebe3](https://github.com/argoproj/argo-events/commit/3391ebe3cb01e7cd5eb955b3f8d657fe14703ffa) chore(deps): bump google.golang.org/grpc from 1.49.0 to 1.50.0 (#2240) + * [f753d1bf](https://github.com/argoproj/argo-events/commit/f753d1bfadad45460d1f3aa462997d0ece9eb33c) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.109 to 1.44.114 (#2239) + * [001415b4](https://github.com/argoproj/argo-events/commit/001415b4d7eafe797da299bb9163a74471f978b0) feat: Azure Service Bus as EventSource (#2229) + * [80a4b8ee](https://github.com/argoproj/argo-events/commit/80a4b8eea8abf2acf1af2e4cd0d0a840bcd60694) Property name typo in Expr filter documentation (#2231) + * [075d7be3](https://github.com/argoproj/argo-events/commit/075d7be35054ad7f2f3e672dae25da967131a8ab) chore(deps): bump dependabot/fetch-metadata from 1.3.3 to 1.3.4 (#2227) + * [aa987f23](https://github.com/argoproj/argo-events/commit/aa987f23a71312e7958f523c5ebb9449409cfe2d) chore(deps): bump google.golang.org/api from 0.97.0 to 0.98.0 (#2226) + * [d5a8ce94](https://github.com/argoproj/argo-events/commit/d5a8ce944174531c687994745fef7beb07002f53) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.105 to 1.44.109 (#2224) + * [406ce8f6](https://github.com/argoproj/argo-events/commit/406ce8f68c1c41c2fa71f7607e667fc3248fdc69) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.35 to 7.0.39 (#2225) + * [71c12968](https://github.com/argoproj/argo-events/commit/71c12968c967ce39cb41f358d1fc672121c69a86) chore(deps): bump github.com/Shopify/sarama from 1.35.0 to 1.37.0 (#2223) + * [81b501e8](https://github.com/argoproj/argo-events/commit/81b501e80e8939385b18e4867b60cd57213d2f7f) Make port configurable in webhook, default is set to 443 (#2215) + * [07e69600](https://github.com/argoproj/argo-events/commit/07e69600caa200c0cfbc5916631eb9cf6bc0d349) feat: Kafka es discontinues processing if eb publishing fails (#2214) + * [49176afb](https://github.com/argoproj/argo-events/commit/49176afb11b1c50c281323899823c06931e0760c) chore(deps): bump actions/stale from 5 to 6 (#2213) + * [cbca423d](https://github.com/argoproj/argo-events/commit/cbca423d700767309f69bbc412e5a41713eabec8) chore(deps): bump google.golang.org/api from 0.93.0 to 0.97.0 (#2211) + * [a3c2e2c8](https://github.com/argoproj/argo-events/commit/a3c2e2c8e9a07b06c8ada57316881a2d5efd6288) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.81 to 1.44.105 (#2208) + * [b2276e7c](https://github.com/argoproj/argo-events/commit/b2276e7ccc2a327e3bfdc32939180efd3826f351) fix filter for github sensor example (#2188) + * [3ffab551](https://github.com/argoproj/argo-events/commit/3ffab5518929facd5c51c312bfcb8013e8c0019b) fix: Emissary executor reads and writes to WorkflowTaskResults, not Pods (#2189) + * [d9edead2](https://github.com/argoproj/argo-events/commit/d9edead21a9bb2a5ec47b82287b14ff54708fb08) Adding Elastic as an Argo Events Users (#2200) + * [39d195cc](https://github.com/argoproj/argo-events/commit/39d195cc75c1d49d85cf5b556dd80e18d6704e9c) chore(deps): bump github.com/nats-io/nats.go from 1.16.0 to 1.17.0 (#2197) + * [b9a3063b](https://github.com/argoproj/argo-events/commit/b9a3063b5ffa2294c5c8c2efe4611cdd0cf85ad0) feat: Webhook event source to support filtering (#2178) + * [c6fa0f44](https://github.com/argoproj/argo-events/commit/c6fa0f446b9dfa1d89ecb37db96b6c1c18999a91) fix: dependency should use % (#2175) + * [c90985da](https://github.com/argoproj/argo-events/commit/c90985da4fee6ffb4b006e3a656e9224d58d6cdf) fix: error is swallowed after retry failure (#2160) + * [beb57b6e](https://github.com/argoproj/argo-events/commit/beb57b6eac367e5bef0341d2cb27553bfe35f0e2) feat: Kafka eventsource supports Sarama config customization (#2161) + * [bb864842](https://github.com/argoproj/argo-events/commit/bb8648427e4d475dd866f8d5ae0af47a6817df8e) feat: retry failed eventbus message publishing (#2162) + * [d15693f7](https://github.com/argoproj/argo-events/commit/d15693f7e6c3abf3cbc8227578a926ae2418440d) feat: Add option to configure NATS max_payload in JetStream eventbus (#2164) + * [85142d0d](https://github.com/argoproj/argo-events/commit/85142d0d4cd870957479844f6265f929849d8133) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.34 to 7.0.35 (#2167) + * [7d5face8](https://github.com/argoproj/argo-events/commit/7d5face8f4f118ab57d99a41e3ca75d036742538) chore(deps): bump github.com/itchyny/gojq from 0.12.8 to 0.12.9 (#2165) + * [b4b45919](https://github.com/argoproj/argo-events/commit/b4b45919c3d87af7d56d56897b7e72c439d153a5) fix: Increase random sleep range in Bitbucket eventsource (#2148) + * [56196143](https://github.com/argoproj/argo-events/commit/56196143ecbf8b451d5ecb02a0ca13835e20954c) chore(deps): bump google.golang.org/grpc from 1.48.0 to 1.49.0 (#2149) + * [8c32250a](https://github.com/argoproj/argo-events/commit/8c32250ad95ec00a8758ed404c9310ce3d881087) solved deleteHookOnFinish race condition + removed hooks daemon mitigation (#2145) + * [6c10f456](https://github.com/argoproj/argo-events/commit/6c10f4565e0a72fb65bb9b92538f45a67017873c) chore(deps): bump github.com/tidwall/gjson from 1.14.2 to 1.14.3 (#2143) + * [887982ef](https://github.com/argoproj/argo-events/commit/887982ef2158bc5495398d889cfa4fe11d005920) chore(deps): bump google.golang.org/api from 0.91.0 to 0.93.0 (#2144) + * [5f883bdb](https://github.com/argoproj/argo-events/commit/5f883bdb5bb381490e48eadab13c0a341cfd8efb) feat: AWS Temporary credential support for SQS eventsource (#2092) + * [5daf962a](https://github.com/argoproj/argo-events/commit/5daf962a22470ccf70eaea56537c5e3f7d8035b5) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.10.1 to 2.11.0 (#2133) + * [2023fc45](https://github.com/argoproj/argo-events/commit/2023fc458bbe48df78fe64e75d030d9022492c61) chore(deps): bump cloud.google.com/go/compute from 1.7.0 to 1.9.0 (#2142) + * [3aae488c](https://github.com/argoproj/argo-events/commit/3aae488c378a9a9356d8714145ba04fab1ed6bc6) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.70 to 1.44.81 (#2141) + * [f37ee4c6](https://github.com/argoproj/argo-events/commit/f37ee4c6cea105dee92c0218abf229b9f5550ded) fix: added controller config validation (#2103) + * [4206495b](https://github.com/argoproj/argo-events/commit/4206495b09e5edb6d4b0acef2d7393de1fe8a2ef) feat: submit from workflowtemplate (#2120) + * [5161c3de](https://github.com/argoproj/argo-events/commit/5161c3de6f8d48ae5fa534be1e9e0673957200cb) chore(deps): bump google.golang.org/api from 0.90.0 to 0.91.0 (#2122) + * [c0c890a9](https://github.com/argoproj/argo-events/commit/c0c890a96e29fead46d45307106f78b879985507) chore(deps): bump actions/setup-python from 4.1.0 to 4.2.0 (#2129) + * [70643548](https://github.com/argoproj/argo-events/commit/70643548a10ed26eece7e70c4fd6e82095269d39) chore(deps): bump github.com/tidwall/gjson from 1.14.1 to 1.14.2 (#2123) + * [28969527](https://github.com/argoproj/argo-events/commit/289695279ec3f494bd12d29b835ce9dbbb5645b4) chore(deps): bump github.com/prometheus/client_golang (#2124) + * [e1640203](https://github.com/argoproj/argo-events/commit/e16402030ec7d530de12bf11ba96d8641c0c419d) chore(deps): bump github.com/xanzy/go-gitlab from 0.69.0 to 0.70.0 (#2128) + * [1d88b042](https://github.com/argoproj/argo-events/commit/1d88b042ff612ddcda3036f6f579931d76d505c8) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.61 to 1.44.70 (#2125) + * [1805365e](https://github.com/argoproj/argo-events/commit/1805365e55c25033f1e56fad0605c611220a09d3) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.32 to 7.0.34 (#2126) + * [7935d426](https://github.com/argoproj/argo-events/commit/7935d42671169b1e74ef5e3d5fdc894ffe757d45) chore(deps): bump github.com/nats-io/stan.go from 0.10.2 to 0.10.3 (#2119) + * [34a7fffe](https://github.com/argoproj/argo-events/commit/34a7fffe65ce1bb72cb904023130e4b598e8eb42) chore(deps): bump google.golang.org/api from 0.88.0 to 0.90.0 (#2117) + * [068bc244](https://github.com/argoproj/argo-events/commit/068bc2447024c9161a138a22de6bb2bce3ab7385) chore(deps): bump github.com/slack-go/slack from 0.11.1 to 0.11.2 (#2118) + * [54ab013d](https://github.com/argoproj/argo-events/commit/54ab013ded5aaf387116a346c7718fd46d2f8e4c) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.31 to 7.0.32 (#2115) + * [0f8ec126](https://github.com/argoproj/argo-events/commit/0f8ec126c365eae7e16347963aa35396f6ffbbc7) fix: CVE-2022-1996 (#2111) + * [3c0d7f9f](https://github.com/argoproj/argo-events/commit/3c0d7f9fc9a999cc2a56c1884c39cc6950fbcca2) fix: return error in case param value resolving fails (#2112) + * [0bbc1911](https://github.com/argoproj/argo-events/commit/0bbc191140e3f87f2291ec481eb6ee8e2059a06a) fix: add SCRAM functionality in Kafka triggers, SCRAM-SHA512/256 SASL (#2087) (#2091) + * [647af913](https://github.com/argoproj/argo-events/commit/647af913cb6426c21549716b13ffe5794a66f066) fix: update bitbucket es example yaml (#2113) + * [93f6f07c](https://github.com/argoproj/argo-events/commit/93f6f07c30586d090b09a50810d94a8f42ef5e24) chore(deps): bump github.com/Shopify/sarama from 1.34.1 to 1.35.0 (#2100) + * [2f8828a7](https://github.com/argoproj/argo-events/commit/2f8828a78a6f10b60e99da59996801e2e8c34ca1) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.3.4 to 1.4.0 (#2099) + * [1eba7588](https://github.com/argoproj/argo-events/commit/1eba7588da606e27dee79e1f44537552b8a0071e) fix: Removed usage of projectKey field in bitbucket eventsource (#2109) + * [dc086761](https://github.com/argoproj/argo-events/commit/dc08676130e1f1de886a60732030e46f9f122c3b) chore(deps): bump github.com/slack-go/slack from 0.11.0 to 0.11.1 (#2097) + * [f419e67c](https://github.com/argoproj/argo-events/commit/f419e67c7e749d1b450115e5ac3ebdbb6ad4cea9) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.56 to 1.44.61 (#2098) + * [e301b691](https://github.com/argoproj/argo-events/commit/e301b691788c121acea6332ee8449cf4a05e07e5) fix: flaky e2e test cases (#2102) + * [b39d0c12](https://github.com/argoproj/argo-events/commit/b39d0c12a75d1bf600379134ee6cbc2919748e3f) chore(deps): bump google.golang.org/api from 0.87.0 to 0.88.0 (#2096) + * [6f277a07](https://github.com/argoproj/argo-events/commit/6f277a0754269ca837c5729da21a678ee7d792c3) chore(deps): bump cloud.google.com/go/pubsub from 1.23.1 to 1.24.0 (#2095) + * [a577bf7f](https://github.com/argoproj/argo-events/commit/a577bf7faf9e7a8ac3a6612fbce25a493cda4189) feat: Make max request payload size configurable in all git/webhook related eventsources (#2093) + * [6a16867e](https://github.com/argoproj/argo-events/commit/6a16867e055153072f0feb2c76cbeb4e729817b5) chore(deps): bump google.golang.org/grpc from 1.47.0 to 1.48.0 (#2079) + * [5ba897ac](https://github.com/argoproj/argo-events/commit/5ba897ac3b20413fb81245a91d4e6680984181a3) chore(deps): bump github.com/xanzy/go-gitlab from 0.68.2 to 0.69.0 (#2082) + * [86b76787](https://github.com/argoproj/argo-events/commit/86b7678720b5b84b6b9d8497ffbecf69f47d53c0) chore(deps): bump google.golang.org/api from 0.86.0 to 0.87.0 (#2080) + * [16a08273](https://github.com/argoproj/argo-events/commit/16a08273e161d76491cb3edce9428ea48422aa59) chore(deps): bump github.com/bradleyfalzon/ghinstallation/v2 (#2081) + * [c4a54b09](https://github.com/argoproj/argo-events/commit/c4a54b0971dc5eea424731f46dedaa72efa92bdc) chore(deps): bump actions/setup-python from 4.0.0 to 4.1.0 (#2084) + * [ddd34c16](https://github.com/argoproj/argo-events/commit/ddd34c161cc3397eadb689caff025d30c9e5a885) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.51 to 1.44.56 (#2083) + * [35184be4](https://github.com/argoproj/argo-events/commit/35184be4a128798b51840dc251897bf99068bab4) Update quick_start.md (#2072) + * [58c42b7f](https://github.com/argoproj/argo-events/commit/58c42b7f42272a271960f4c6c802281248d12cd6) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.30 to 7.0.31 (#2071) + * [5359f0da](https://github.com/argoproj/argo-events/commit/5359f0da650f8e2321f338e947f2931d50aea061) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.47 to 1.44.51 (#2070) + * [ceffdabc](https://github.com/argoproj/argo-events/commit/ceffdabcf3fb372b96ca454da635176b0157e9a1) feat: Add support for custom SNS endpoint (#2067) + * [c5171e69](https://github.com/argoproj/argo-events/commit/c5171e69a57dd059bcf5091080db197b9714bfed) chore(deps): bump cloud.google.com/go/pubsub from 1.23.0 to 1.23.1 (#2061) + * [00a4270a](https://github.com/argoproj/argo-events/commit/00a4270afa597766f9e35da179d1896a6924298b) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.29 to 7.0.30 (#2065) + * [0a3dbc2f](https://github.com/argoproj/argo-events/commit/0a3dbc2fe4415c8aeda2a33a4fdd504f890f6f1f) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.42 to 1.44.47 (#2064) + * [6ebadffc](https://github.com/argoproj/argo-events/commit/6ebadffce30dfebef68da3d24d147c889ee163ee) chore(deps): bump dependabot/fetch-metadata from 1.3.1 to 1.3.3 (#2066) + * [5014367b](https://github.com/argoproj/argo-events/commit/5014367b949a7ac55f629feb738612ea71a299dc) chore(deps): bump github.com/xanzy/go-gitlab from 0.68.0 to 0.68.2 (#2063) + * [91a57faf](https://github.com/argoproj/argo-events/commit/91a57fafcdd01339687033abb726c15b13f52d17) chore(deps): bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#2062) + * [11276110](https://github.com/argoproj/argo-events/commit/11276110b72d04a5d8fb913ef00f9e6583deb858) chore(deps): bump google.golang.org/api from 0.85.0 to 0.86.0 (#2060) + * [6cb8d405](https://github.com/argoproj/argo-events/commit/6cb8d405a7b646a9f808bfe159399481cc2208aa) chore(deps): bump cloud.google.com/go/pubsub from 1.22.2 to 1.23.0 (#2048) + * [df811619](https://github.com/argoproj/argo-events/commit/df81161950c26243eb643e13aca111556b444d88) chore(deps): bump github.com/argoproj/pkg from 0.13.3 to 0.13.6 (#2052) + * [89e9b872](https://github.com/argoproj/argo-events/commit/89e9b872ac9aabca0d9bcdb4a1fb9fa9515caf3e) chore(deps): bump github.com/stretchr/testify from 1.7.2 to 1.7.5 (#2053) + * [7cd9c486](https://github.com/argoproj/argo-events/commit/7cd9c4862bf01e1decb7f535ac1fa80b1f88f24b) chore(deps): bump google.golang.org/api from 0.84.0 to 0.85.0 (#2051) + * [19b14851](https://github.com/argoproj/argo-events/commit/19b148513b36b437f2d6b6334d5bda20a8464bc3) chore(deps): bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#2050) + * [5f2b8cbc](https://github.com/argoproj/argo-events/commit/5f2b8cbc6c5f670691e8882c2594dcdd79f41066) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.37 to 1.44.42 (#2049) + * [22d59c8f](https://github.com/argoproj/argo-events/commit/22d59c8f21c943c3083800afb03581c0889f576c) chore(deps): bump cloud.google.com/go/compute from 1.6.1 to 1.7.0 (#2039) + * [93b92478](https://github.com/argoproj/argo-events/commit/93b92478fe674813fa3c309a0e1219c7d837b5e8) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.47 to 0.9.48 (#2041) + * [ebef2a1d](https://github.com/argoproj/argo-events/commit/ebef2a1d5cf9e0c0a643012018dd4b6b326441be) chore(deps): bump github.com/argoproj/pkg from 0.13.1 to 0.13.3 (#2037) + * [13817b08](https://github.com/argoproj/argo-events/commit/13817b0881136273949553cd7cb27202ede47470) chore(deps): bump github.com/slack-go/slack from 0.10.3 to 0.11.0 (#2040) + * [1694586e](https://github.com/argoproj/argo-events/commit/1694586e1723093ec08b48b09ffae9fb22c85bf1) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.27 to 7.0.29 (#2038) + * [2fc93663](https://github.com/argoproj/argo-events/commit/2fc93663f5cae430a268ca4018eb5a51d452dd31) chore(deps): bump google.golang.org/api from 0.83.0 to 0.84.0 (#2035) + * [d227f098](https://github.com/argoproj/argo-events/commit/d227f098891eb19e92241eaaaee034dedd96d859) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.32 to 1.44.37 (#2036) + * [4d5852e7](https://github.com/argoproj/argo-events/commit/4d5852e70ce4ec05ddbf33aec991dd402733d812) feat: Added multiple repos support for bitbucket eventsource (#2031) + * [ef4ce09f](https://github.com/argoproj/argo-events/commit/ef4ce09fb4e0faf6e700e1c90630742a1650cf7a) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.10.0 to 2.10.1 (#2027) + * [18133711](https://github.com/argoproj/argo-events/commit/18133711845b68ffec08cce3ca782415e7b1b884) chore(deps): bump google.golang.org/api from 0.82.0 to 0.83.0 (#2029) + * [fef84cd1](https://github.com/argoproj/argo-events/commit/fef84cd12365e086d84bc1fa6ebfe762fcb13936) chore(deps): bump actions/setup-python from 3.1.2 to 4.0.0 (#2030) + * [c4b99368](https://github.com/argoproj/argo-events/commit/c4b9936815d8eb893d2c932dc26023a014525fca) chore(deps): bump github.com/Shopify/sarama from 1.34.0 to 1.34.1 (#2028) + * [9ee348c7](https://github.com/argoproj/argo-events/commit/9ee348c702c80b9a5b189c0b96e463a8749f650e) chore(deps): bump github.com/eclipse/paho.mqtt.golang (#2023) + * [3c1a96cd](https://github.com/argoproj/argo-events/commit/3c1a96cdac8ade456ee447501924158554116243) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.46 to 0.9.47 (#2025) + * [0a656dc1](https://github.com/argoproj/argo-events/commit/0a656dc194cb771c9831807db772a7867992cfbc) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.27 to 1.44.32 (#2024) + * [0b02d71b](https://github.com/argoproj/argo-events/commit/0b02d71be11381f9dbdee519ee93a303b85b114f) chore(deps): bump google.golang.org/api from 0.81.0 to 0.82.0 (#2017) + * [7a7e48fe](https://github.com/argoproj/argo-events/commit/7a7e48fef42ccf14d783ad1d49f2e1560b920d23) chore(deps): bump github.com/Shopify/sarama from 1.33.0 to 1.34.0 (#2013) + * [c255156b](https://github.com/argoproj/argo-events/commit/c255156bf598def923136a0397e4beaf93915630) chore(deps): bump cloud.google.com/go/pubsub from 1.21.1 to 1.22.2 (#2016) + * [8aea9560](https://github.com/argoproj/argo-events/commit/8aea9560110cfbd68aaf54bb24f10af953d35594) chore(deps): bump google.golang.org/grpc from 1.46.2 to 1.47.0 (#2014) + * [a6e1f934](https://github.com/argoproj/argo-events/commit/a6e1f934ac2676a63fc9b39586b7b359063b499d) chore(deps): bump github.com/itchyny/gojq from 0.12.7 to 0.12.8 (#2015) + * [8ace2240](https://github.com/argoproj/argo-events/commit/8ace2240d293247d0284e50c5f7d33f8773580d8) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.24 to 1.44.27 (#2012) + * [0cf63142](https://github.com/argoproj/argo-events/commit/0cf6314218da9cbfdf0f3ecade3116ffeb928f6b) chore(deps): bump github.com/imdario/mergo from 0.3.12 to 0.3.13 (#2001) + * [e085cae8](https://github.com/argoproj/argo-events/commit/e085cae8ee10102e1520e5b6eefcdb18686f8557) Add support for Redis ACL auth (#2007) + * [3b69cce1](https://github.com/argoproj/argo-events/commit/3b69cce1b7a683f71c07c00f675281c64d383358) chore(deps): bump actions/setup-go from 3.1.0 to 3.2.0 (#2006) + * [2eb04d28](https://github.com/argoproj/argo-events/commit/2eb04d28251bcf6574cbb89142bf609268f40b7e) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.26 to 7.0.27 (#2005) + * [e5ffa4d7](https://github.com/argoproj/argo-events/commit/e5ffa4d7d6b385cc5f798310a432e2bf6098543e) chore(deps): bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#2004) + * [5601e38c](https://github.com/argoproj/argo-events/commit/5601e38cf9faae16e238da4c2e04917e79bd3c41) chore(deps): bump github.com/nats-io/nats.go from 1.15.0 to 1.16.0 (#2003) + * [964ba884](https://github.com/argoproj/argo-events/commit/964ba884ec60a8c123310316cabbe304b9461e98) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.21 to 1.44.24 (#2002) + * [5aad5292](https://github.com/argoproj/argo-events/commit/5aad5292582aa0c41af50ff5f078e4a817a869b9) chore(deps): bump github.com/xanzy/go-gitlab from 0.65.0 to 0.68.0 (#1995) + * [f4f5101f](https://github.com/argoproj/argo-events/commit/f4f5101fe177f4ecf765acd4fb106f97581e40d9) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.19 to 1.44.21 (#1993) + * [7f54ff06](https://github.com/argoproj/argo-events/commit/7f54ff06a086fb2df62b8cd5ba4052ba3d78b174) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.45 to 0.9.46 (#1994) + * [4fafbe8b](https://github.com/argoproj/argo-events/commit/4fafbe8bb0b84baffb153391af387870413e1313) chore(deps): bump google.golang.org/api from 0.80.0 to 0.81.0 (#1992) + * [9ea66fb9](https://github.com/argoproj/argo-events/commit/9ea66fb9f8d29063d9cd08a68cafb1e46dcf392d) fix: jetstream statefulset resource setting (#1989) + * [1d2f90ce](https://github.com/argoproj/argo-events/commit/1d2f90ce04cde95517c087ee634863550e7f22cf) feat: add support to parse QueryParameter and PostForm on webhook eve… (#1978) + * [6330171f](https://github.com/argoproj/argo-events/commit/6330171f2859984b0b12e4f40f445b1254e4b07f) chore(deps): bump google.golang.org/api from 0.79.0 to 0.80.0 (#1985) + * [31495bdb](https://github.com/argoproj/argo-events/commit/31495bdb27736c636f1e01bb5488b60896ac7d13) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.9.0 to 2.10.0 (#1984) + * [64c6dbcd](https://github.com/argoproj/argo-events/commit/64c6dbcdac5c5edc32bfd8bf3b930237728071e2) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.14 to 1.44.19 (#1983) + * [fd18e818](https://github.com/argoproj/argo-events/commit/fd18e81854956bbb0c662a9dd4d230c026e41b36) fix: make access token optional (#1976) + * [40c4bffa](https://github.com/argoproj/argo-events/commit/40c4bffaae912b45e54df87502b6bde22ce25a0d) fix: Limit github hooks manager daemon lifetime to 10 min (#1930) + * [adf93fa2](https://github.com/argoproj/argo-events/commit/adf93fa277c499fe2e121526df56d0c0921b0046) chore(deps): bump google.golang.org/grpc from 1.46.0 to 1.46.2 (#1974) + * [c8a20141](https://github.com/argoproj/argo-events/commit/c8a20141c2fc973da7de80ee607e1cc3cf040cdc) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.9 to 1.44.14 (#1973) + * [87ee3875](https://github.com/argoproj/argo-events/commit/87ee3875d1934c9bfe7b65a043571f2d79c835e6) chore(deps): bump actions/setup-go from 3.0.0 to 3.1.0 (#1975) + * [48ad9890](https://github.com/argoproj/argo-events/commit/48ad9890f4f2b9052c055258a69544314bef8fe2) chore(deps): bump github.com/prometheus/client_golang (#1971) + * [6c31a1cb](https://github.com/argoproj/argo-events/commit/6c31a1cb2280f45f3546d5850e71b75a24b841cc) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.44 to 0.9.45 (#1969) + * [93d9c3fb](https://github.com/argoproj/argo-events/commit/93d9c3fb92e48d92fdc352a32351c0ad6be6469c) chore(deps): bump google.golang.org/api from 0.78.0 to 0.79.0 (#1970) + * [5ce87dbe](https://github.com/argoproj/argo-events/commit/5ce87dbe0438d952d01901625b49f115f1692925) chore(deps): bump github.com/xanzy/go-gitlab from 0.64.0 to 0.65.0 (#1968) + * [ac56855a](https://github.com/argoproj/argo-events/commit/ac56855a431c1a95eec81d6174a1bfff85fd4bdf) chore(deps): bump github.com/Shopify/sarama from 1.32.0 to 1.33.0 (#1967) + * [c70a1d11](https://github.com/argoproj/argo-events/commit/c70a1d11cd2818f6164bbcfef510d9ba10ef7cdc) fix: git artifactory arbitrary file read issue (#1965) + * [2b1244f4](https://github.com/argoproj/argo-events/commit/2b1244f432b19cac070658f0fe17bf4460931eb5) feat: add support of submit from existing resource (#1908) (#1941) + * [97c8d3de](https://github.com/argoproj/argo-events/commit/97c8d3de1060a73025e661e611fb21a28beea2d3) fix: use crypto/rand instead of math/rand (#1959) + * [e1c0736e](https://github.com/argoproj/argo-events/commit/e1c0736e1a28ace1c53a0380e21d29718fa2d8c1) fix: add nil check on sensor spec validation (#1961) + * [41936c63](https://github.com/argoproj/argo-events/commit/41936c63b52be2964174d759b681d3abd3eb1855) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.24 to 7.0.26 (#1939) + * [401566a1](https://github.com/argoproj/argo-events/commit/401566a13f3820f8e0232f7036af7051a928fdeb) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.8 to 1.44.9 (#1938) + * [106927cb](https://github.com/argoproj/argo-events/commit/106927cb2f68fb10e3934eec99b1bfe430ba4273) feat: Expand e2e tests (#1859) + * [33c2a53e](https://github.com/argoproj/argo-events/commit/33c2a53ebe68200c94bb156802e1e357fdbe4bd1) chore(deps): bump docker/setup-buildx-action from 1 to 2 (#1935) + * [504c9fc9](https://github.com/argoproj/argo-events/commit/504c9fc921bf24ffc9dfc57315e1a1d2ee0db36c) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.7 to 1.44.8 (#1937) + * [3b59a451](https://github.com/argoproj/argo-events/commit/3b59a451bd052ff63210a030224aa00683c4a30d) chore(deps): bump docker/login-action from 1 to 2 (#1936) + * [564d5763](https://github.com/argoproj/argo-events/commit/564d5763bc5a0148cb3ed737b7cadde46f721668) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.6 to 1.44.7 (#1933) + * [1a70a852](https://github.com/argoproj/argo-events/commit/1a70a85247915cc2c4da9ba92c32e0d6f1601d88) chore(deps): bump github.com/nats-io/nats.go from 1.14.0 to 1.15.0 (#1932) + * [41c1aebc](https://github.com/argoproj/argo-events/commit/41c1aebc83ae3f0fbf508469eb2bf1c4e00e1b25) chore(deps): bump cloud.google.com/go/pubsub from 1.21.0 to 1.21.1 (#1931) + * [2835521e](https://github.com/argoproj/argo-events/commit/2835521e4771943f1853799fb4856bc35b6a233c) chore(deps): bump google.golang.org/api from 0.77.0 to 0.78.0 (#1924) + * [702250d9](https://github.com/argoproj/argo-events/commit/702250d9afdc9c3527db71add6ea9c144e69f4f3) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.5 to 1.44.6 (#1923) + * [98f55200](https://github.com/argoproj/argo-events/commit/98f5520085569265ac7840853629ba058a0d370a) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.4 to 1.44.5 (#1922) + * [d6e6aed6](https://github.com/argoproj/argo-events/commit/d6e6aed65b1f538f3148ec01ec95820924d26e29) fix: Enforce webhook secret in BitbucketServer event source (#1917) + * [bff4a690](https://github.com/argoproj/argo-events/commit/bff4a690584faa0f4b928a3a346993b121f03f1f) chore(deps): bump google.golang.org/api from 0.76.0 to 0.77.0 (#1919) + * [6711754d](https://github.com/argoproj/argo-events/commit/6711754d4174aa49c73d3505c201211263cd241c) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.2 to 1.44.4 (#1918) + * [2d9fc5df](https://github.com/argoproj/argo-events/commit/2d9fc5df2b6305f30c771b9e40b8a99c916b85c0) added script filter (#1894) + * [111c3379](https://github.com/argoproj/argo-events/commit/111c3379eaeefba9d15c8b7eebc5ebef7b530649) fix: Bitbucketserver webhook secret optional (#1902) + * [fabd138f](https://github.com/argoproj/argo-events/commit/fabd138f96a0a63c3401bd988e9afc3984195719) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 (#1912) + * [46f2b573](https://github.com/argoproj/argo-events/commit/46f2b5736c59c27f0476d65b91aae6ffe88e4c57) chore(deps): bump github.com/argoproj/pkg from 0.13.0 to 0.13.1 (#1911) + * [4c00c92e](https://github.com/argoproj/argo-events/commit/4c00c92e67800173e6345927d4534d6afd59774e) chore(deps): bump google.golang.org/api from 0.75.0 to 0.76.0 (#1910) + * [f5de9320](https://github.com/argoproj/argo-events/commit/f5de932048e69c84bf034b237455cddbdfbb3216) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.1 to 1.44.2 (#1909) + * [e8e373e2](https://github.com/argoproj/argo-events/commit/e8e373e27bcee91d421c0bdf1b5a4353077678f2) chore(deps): bump github.com/fsnotify/fsnotify from 1.5.3 to 1.5.4 (#1907) + * [73f5c730](https://github.com/argoproj/argo-events/commit/73f5c730398d7e2e37f1d526181f4ddbc310f7f0) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.0 to 1.44.1 (#1906) + * [c55297e3](https://github.com/argoproj/argo-events/commit/c55297e31fafc5a528a624d4a1e70cd42663ba48) chore(deps): bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#1905) + * [25881912](https://github.com/argoproj/argo-events/commit/25881912cae015d33a2cbfa59d8882bb4e7ccf1f) chore(deps): bump cloud.google.com/go/pubsub from 1.20.0 to 1.21.0 (#1904) + * [591c9b2a](https://github.com/argoproj/argo-events/commit/591c9b2ada94ee4da0476f27a02eef97d2a7680f) chore(deps): bump github.com/argoproj/pkg from 0.12.0 to 0.13.0 (#1903) + * [c4b76b7c](https://github.com/argoproj/argo-events/commit/c4b76b7c69b65dd39fc6bc93b753c876ef0db65f) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.45 to 1.44.0 (#1900) + * [ce093b0e](https://github.com/argoproj/argo-events/commit/ce093b0e1f60554738cca169307f913c6ac487a8) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.44 to 1.43.45 (#1898) + * [bdb5ec3f](https://github.com/argoproj/argo-events/commit/bdb5ec3f6a390ac6cbe76964eff7e8cd77c90820) chore(deps): bump github.com/xanzy/go-gitlab from 0.63.0 to 0.64.0 (#1897) + * [d60f9e6a](https://github.com/argoproj/argo-events/commit/d60f9e6a75fe48d2395e2a8124b21cf9bcc594f8) chore(deps): bump github.com/slack-go/slack from 0.10.2 to 0.10.3 (#1896) + * [315a87b7](https://github.com/argoproj/argo-events/commit/315a87b726c3302923ea457b096b06a92d9e9c25) chore(deps): bump google.golang.org/grpc from 1.45.0 to 1.46.0 (#1895) + * [4ea71426](https://github.com/argoproj/argo-events/commit/4ea71426dc173b03a0f68d4631c7b23d5c894d16) Fix a concurrency bug in stress test, plus clean up of e2e tests (#1882) + +### Contributors + + * Aalok Ahluwalia + * AdamKorcz + * Amirio + * Ben Brandt + * Bilal Bakht Ahmad + * Brad Fair + * Christopher Cutajar + * Clément + * Daniel + * David Farr + * Derek Wang + * Eduardo Rodrigues + * GoshaDo + * Harshdeep Singh + * Ian McGraw + * Ivan Babiankou + * Jesse Suen + * Jhonn W. Frazão + * Jorge + * Juan Iglesias + * Julie Vogelman + * Matthieu Simon + * Nick Palumbo + * Nir Shtein + * Ole-Martin Bratteng + * Omar Elbanby + * Patrick Marx + * Peter Hoellig + * Petri Kivikangas + * Prema + * Ramin A + * Robert Deusser + * Shyukri Shyukriev + * Sudesh Jethoe + * Thomas Ribeiro de Araújo + * Tom Elliff-O'Shea + * Vaibhav + * Vaibhav Kaushik + * Zubair Haque + * antoniosi + * avasiliev + * dependabot[bot] + * emmayylu + * joepk + * jsvk + * khmjp + * neo + * shirou + +## v1.7.6 (2023-02-09) + + * [eff04343](https://github.com/argoproj/argo-events/commit/eff0434356052ccffdf0b13bef55f861dcde6e30) Update manifests to v1.7.6 + * [d0481ae7](https://github.com/argoproj/argo-events/commit/d0481ae76047c31b04873bafc63d86ab9b1bc32b) feat: Optional kubernetes-based leader election (#2438) + * [413ca1f6](https://github.com/argoproj/argo-events/commit/413ca1f6dfacf878cb7f049e99560baddf1cb4f3) fix: remove the secret watch privilege dependency from js eb ctrler (#2453) + * [0b79a44d](https://github.com/argoproj/argo-events/commit/0b79a44dbd7d7bba2ef20dfe0127c8f867909e9f) chore(deps): bump github.com/xanzy/go-gitlab from 0.78.0 to 0.79.1 (#2460) + * [666a29d6](https://github.com/argoproj/argo-events/commit/666a29d686b037e8fec54527f9b8cbbf785daf75) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.6.0 to 1.6.1 (#2459) + * [ca0fbdb7](https://github.com/argoproj/argo-events/commit/ca0fbdb7bbbd87e0bb04f7d74f1c816301388013) chore(deps): bump google.golang.org/api from 0.108.0 to 0.109.0 (#2456) + * [08e84bd9](https://github.com/argoproj/argo-events/commit/08e84bd92d54365d293bdba98c7b98c9887ea463) chore(deps): bump google.golang.org/grpc from 1.52.0 to 1.52.3 (#2448) + * [c377ea0a](https://github.com/argoproj/argo-events/commit/c377ea0a1881a91e3ce7c84625176691c07bf5d8) chore(deps): bump github.com/antonmedv/expr from 1.10.1 to 1.10.5 (#2446) + +### Contributors + + * David Farr + * Derek Wang + * dependabot[bot] + +## v1.7.5 (2023-01-23) + + * [de0a80e6](https://github.com/argoproj/argo-events/commit/de0a80e6020eee9cfcbd9afc1b6fb0b2479a66ae) Update manifests to v1.7.5 + * [be4ce30e](https://github.com/argoproj/argo-events/commit/be4ce30ecb368fd946bad77adf49fef62055d4dc) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.8.0 to 2.9.0 (#2434) + * [9dfedcc7](https://github.com/argoproj/argo-events/commit/9dfedcc74c3de38c0ec16756acd559ac8a5b58e1) chore(deps): bump github.com/antonmedv/expr from 1.9.0 to 1.10.1 (#2433) + * [a3bec705](https://github.com/argoproj/argo-events/commit/a3bec705ab8167369ec681ec04bc8acc4efb5aee) chore(deps): bump github.com/spf13/viper from 1.14.0 to 1.15.0 (#2432) + * [650e819a](https://github.com/argoproj/argo-events/commit/650e819aa73ebe6cf852ee1c4dec6fff7c87265b) chore(deps): bump github.com/go-swagger/go-swagger from 0.29.0 to 0.30.4 (#2431) + * [481bed6f](https://github.com/argoproj/argo-events/commit/481bed6f1fbd685e50ace73216830dd416b3a72d) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.180 to 1.44.184 (#2429) + * [091e6fd5](https://github.com/argoproj/argo-events/commit/091e6fd55df9b99367f9fd72c2c740eec9b86baa) chore(deps): bump github.com/nats-io/nats.go from 1.22.1 to 1.23.0 (#2428) + * [f584c8f3](https://github.com/argoproj/argo-events/commit/f584c8f3d96d609e4b511a533dcccec736e7a641) chore(deps): bump google.golang.org/api from 0.107.0 to 0.108.0 (#2430) + * [c511457d](https://github.com/argoproj/argo-events/commit/c511457d21df6e0282f8b3fb23d65f5ec27217c5) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.4.0 to 1.6.0 (#2427) + * [1a3a79ad](https://github.com/argoproj/argo-events/commit/1a3a79ad84a1ab3944b29a27e518bec6c670cee5) fix: kafka schema registry (#2423) + * [70e3ad0c](https://github.com/argoproj/argo-events/commit/70e3ad0cd375b3b773ada4898aea09e7bc545c2f) Implement optional at least once semantics (#2404) + * [62fb8d1a](https://github.com/argoproj/argo-events/commit/62fb8d1a01585e108fa3d2752573f4407867312c) feat: allow granular secret privileges. additional controller logging/leader-election options (#2411) + * [225b1d12](https://github.com/argoproj/argo-events/commit/225b1d125e64b867fd4cf4988eb5587ef2328cdd) chore(deps): bump github.com/Shopify/sarama from 1.37.0 to 1.38.0 (#2419) + * [08435228](https://github.com/argoproj/argo-events/commit/0843522807564e652a6a17a497e341bd0f5262b5) chore(deps): bump google.golang.org/api from 0.106.0 to 0.107.0 (#2420) + * [9cb1d9d4](https://github.com/argoproj/argo-events/commit/9cb1d9d4840a1408270f2234c5f87814fb3c5d2c) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.46 to 7.0.47 (#2421) + * [1e3a1994](https://github.com/argoproj/argo-events/commit/1e3a1994f453d1f0ce405e26f77c50d48719c35e) chore(deps): bump google.golang.org/grpc from 1.51.0 to 1.52.0 (#2418) + * [663ae056](https://github.com/argoproj/argo-events/commit/663ae0564b65b984edc3f17b74ed2bd1eb64745e) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.3 to 1.1.4 (#2417) + * [3ef04688](https://github.com/argoproj/argo-events/commit/3ef046885e4549fec461712b635f2a6042b95596) chore(deps): bump github.com/nats-io/stan.go from 0.10.3 to 0.10.4 (#2416) + * [e9927635](https://github.com/argoproj/argo-events/commit/e992763550d38e44d7a6b875482569ba9f6ca079) chore(deps): bump github.com/xanzy/go-gitlab from 0.76.0 to 0.78.0 (#2414) + * [d401800a](https://github.com/argoproj/argo-events/commit/d401800a9bf5dc88ef269d5219fe6243f2ac83ec) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.11.0 to 2.13.0 (#2415) + * [a435af49](https://github.com/argoproj/argo-events/commit/a435af49b378d1948fcf86bf237f6dab20490a5e) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.175 to 1.44.180 (#2413) + * [327fd63d](https://github.com/argoproj/argo-events/commit/327fd63d63da30d04b089cce08f4f026faf16c79) chore(deps): bump actions/setup-python from 4.4.0 to 4.5.0 (#2412) + * [f545dfb4](https://github.com/argoproj/argo-events/commit/f545dfb4048b4feccfda0a42233c45be51716d0d) fix: cloneDirectory validation on git artifcatory spec (#2407) + * [f60ae2c0](https://github.com/argoproj/argo-events/commit/f60ae2c0a8c2ec29150c88f0580605e90b26fa17) feat(sensor): Kafka Trigger - support avro/schemaRegistry (#2385) + * [c56b2d54](https://github.com/argoproj/argo-events/commit/c56b2d54c157a309685d7abb77a770867933d7ab) chore(deps): bump golang.org/x/crypto from 0.4.0 to 0.5.0 (#2401) + * [ac2cb59f](https://github.com/argoproj/argo-events/commit/ac2cb59fe1f0b50f1bf797975880a114bc0cd95b) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.171 to 1.44.175 (#2400) + * [1b8d5fca](https://github.com/argoproj/argo-events/commit/1b8d5fca97e9dd226705654e9610093ed5312970) chore(deps): bump google.golang.org/api from 0.104.0 to 0.106.0 (#2399) + * [ae5abfc2](https://github.com/argoproj/argo-events/commit/ae5abfc27217773a9583c2cbdb53cc4f1765ea1d) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.45 to 7.0.46 (#2398) + * [0da2dfe3](https://github.com/argoproj/argo-events/commit/0da2dfe3e811a9bfbc9118e65f745dd2abf1072a) NATS event data - add header field (#2396) + * [ff524af8](https://github.com/argoproj/argo-events/commit/ff524af8366acaec033998f5692e842ad516a95d) fix: fix bug in evaluation of filters with filtersLogicalOperator=or (#2374) + * [eeae3da9](https://github.com/argoproj/argo-events/commit/eeae3da976f5f735e698f66eb44d06ff9a728251) Implement multiple partions usage in Kafka trigger (#2360) + * [a0ef3c24](https://github.com/argoproj/argo-events/commit/a0ef3c24eb960539a73c157dfdab65f6fcbc8f55) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.162 to 1.44.171 (#2387) + * [c650f05b](https://github.com/argoproj/argo-events/commit/c650f05bdbf82abb54e9e1e5a8af6d26b2f1997f) chore(deps): bump github.com/nats-io/nats.go from 1.21.0 to 1.22.1 (#2381) + * [8565b135](https://github.com/argoproj/argo-events/commit/8565b135fd79220b8457cf13b013e98943c28ab3) chore(deps): bump github.com/slack-go/slack from 0.12.0 to 0.12.1 (#2379) + * [b7ff5c15](https://github.com/argoproj/argo-events/commit/b7ff5c15cf7c0ffee9fc0c3d968a563265520a9e) chore(deps): bump github.com/itchyny/gojq from 0.12.10 to 0.12.11 (#2380) + * [4db58c64](https://github.com/argoproj/argo-events/commit/4db58c64fb0a3b38eb0e511d994e49d1e39794d8) chore(deps): bump actions/setup-python from 4.3.0 to 4.4.0 (#2377) + * [cca6e338](https://github.com/argoproj/argo-events/commit/cca6e33809078cb4849e966d88d6560b9fddab48) chore(deps): bump actions/stale from 6 to 7 (#2378) + * [e2e6de21](https://github.com/argoproj/argo-events/commit/e2e6de214e6a0bf58007f87a417f268c28f1b1fd) fix: Fixed Github Sensor example and minor doc correction (#2373) + * [17f9abe6](https://github.com/argoproj/argo-events/commit/17f9abe6b4823506a375ba73fdc34cfb3e31da51) feat: Expand Slack Trigger Capabilities - blocks,attachments,threads and more (#2369) + * [6a0ef35a](https://github.com/argoproj/argo-events/commit/6a0ef35af79e9bba59e77768efb3f3b438b7f668) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.157 to 1.44.162 (#2368) + * [46f16e53](https://github.com/argoproj/argo-events/commit/46f16e53e0d0e9ca76f79e0ed985b1cfae2c4c1d) chore(deps): bump cloud.google.com/go/compute/metadata from 0.2.2 to 0.2.3 (#2366) + * [957aecdb](https://github.com/argoproj/argo-events/commit/957aecdb5189e4c70b8d102cd18eba0facdfcc40) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.7.0 to 2.8.0 (#2365) + * [8037c502](https://github.com/argoproj/argo-events/commit/8037c502283418ba0188a5991d8ba500c2e7f51e) chore(deps): bump github.com/slack-go/slack from 0.11.2 to 0.12.0 (#2364) + * [4f4cce93](https://github.com/argoproj/argo-events/commit/4f4cce933272e33db93746867518184e28904098) chore(deps): bump actions/setup-go from 3.4.0 to 3.5.0 (#2361) + * [1832666b](https://github.com/argoproj/argo-events/commit/1832666b6292582cd9d8ba421455e53773a89744) Make kafka eventsource compatible with samara 1.37.0 Addresses #2358 (#2359) + * [b8598982](https://github.com/argoproj/argo-events/commit/b859898290c2c044a9f96b297f887121cc8a60ee) fix: typo in gitlab example eventsource (#2353) + +### Contributors + + * Bilal Bakht Ahmad + * Brad Fair + * Daniel + * Derek Wang + * Jesse Suen + * Jhonn W. Frazão + * Nick Palumbo + * Ramin A + * Thomas Ribeiro de Araújo + * dependabot[bot] + * jsvk + +## v1.7.4 (2022-12-11) + + * [06409b6e](https://github.com/argoproj/argo-events/commit/06409b6e9a7c9644efbac248f48c276e5f12b51f) Update manifests to v1.7.4 + * [5ad08812](https://github.com/argoproj/argo-events/commit/5ad08812278e5eb3f090c4f38bb44be9f06fbdaf) chore(deps): bump github.com/nats-io/nats.go from 1.20.0 to 1.21.0 (#2351) + * [bbcfce51](https://github.com/argoproj/argo-events/commit/bbcfce51a772168f149e437b350bf6580557c8b8) chore(deps): bump google.golang.org/api from 0.103.0 to 0.104.0 (#2350) + * [89c545a1](https://github.com/argoproj/argo-events/commit/89c545a1e0ae2d0bb912fe0661451644255351d7) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 from 3.3.19 to 3.4.0 (#2347) + * [0b2b5837](https://github.com/argoproj/argo-events/commit/0b2b5837e1d94ed76838592cad4b47b3d12cc924) chore(deps): bump golang.org/x/crypto from 0.3.0 to 0.4.0 (#2348) + * [8887cbdc](https://github.com/argoproj/argo-events/commit/8887cbdc9c9b082427ede2069757673648569ec1) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.152 to 1.44.157 (#2349) + * [26c173a5](https://github.com/argoproj/argo-events/commit/26c173a51b876b04bffb7c451b570131a2cc83dc) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.48 to 0.9.55 (#2346) + * [c7e185be](https://github.com/argoproj/argo-events/commit/c7e185be1041e39dd5a8ffdf2a9b5c26705e4124) chore(deps): bump cloud.google.com/go/pubsub from 1.27.1 to 1.28.0 (#2345) + * [ec808411](https://github.com/argoproj/argo-events/commit/ec808411c21d90e49e2d6f45b56e37fe53da7516) chore(deps): bump github.com/xdg-go/scram from 1.1.1 to 1.1.2 (#2343) + * [57bf8fd9](https://github.com/argoproj/argo-events/commit/57bf8fd95ed1d49ab4c358ed16b1d850886afef2) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.6.1 to 2.7.0 (#2344) + * [f4d95c9f](https://github.com/argoproj/argo-events/commit/f4d95c9f76c57aef8a14189de5d491e5cd053101) chore(deps): bump go.uber.org/zap from 1.21.0 to 1.24.0 (#2335) + * [50009e84](https://github.com/argoproj/argo-events/commit/50009e84cc53f62694bebf23789c42d6daed128b) chore(deps): bump github.com/Masterminds/sprig/v3 from 3.2.0 to 3.2.3 (#2339) + * [3d36372a](https://github.com/argoproj/argo-events/commit/3d36372a4e68c2b22b68bb24ed4f2e4a601d4d33) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.44 to 7.0.45 (#2336) + * [f0759423](https://github.com/argoproj/argo-events/commit/f0759423a386a303a3342cbaec9f90d0e2a9b4de) chore(deps): bump cloud.google.com/go/pubsub from 1.26.0 to 1.27.1 (#2337) + * [366a6519](https://github.com/argoproj/argo-events/commit/366a651997b3a56d4a27a9d898fab92372657b49) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.145 to 1.44.152 (#2338) + * [0fc873bd](https://github.com/argoproj/argo-events/commit/0fc873bd7e899db67c423be91bb4a98b0b7496c5) chore(deps): bump cloud.google.com/go/compute/metadata from 0.2.1 to 0.2.2 (#2334) + * [275ee246](https://github.com/argoproj/argo-events/commit/275ee24685f6459ffa6112fa730afe349efcf493) chore(deps): bump github.com/itchyny/gojq from 0.12.9 to 0.12.10 (#2332) + * [55b59e55](https://github.com/argoproj/argo-events/commit/55b59e5559367a101170638d859ef6ef616f1d9b) chore(deps): bump actions/setup-go from 3.3.1 to 3.4.0 (#2331) + * [69a5dd69](https://github.com/argoproj/argo-events/commit/69a5dd692e76eb643408964ae069aed44cd42b80) feat: Azure Service Bus as a Trigger (#2280) + * [6f14c946](https://github.com/argoproj/argo-events/commit/6f14c9460a1f85b25cb35b8c81a156fc5f44abe1) feat: Enable adding customized logging fields in sensor (#2325) + * [7aa2d0a6](https://github.com/argoproj/argo-events/commit/7aa2d0a6a9216b863b7b98ae76928f21cada85db) feat: Support non-string parameters. Closes #1236 (#2317) + * [9e467ff0](https://github.com/argoproj/argo-events/commit/9e467ff03a55c034a26ae2ee3e56ac7bd06d67fb) [issue-1863] username and password auth support for mqtt eventsource (#2324) + * [9f6559ef](https://github.com/argoproj/argo-events/commit/9f6559ef64b5bd2cefe177060dd913229a19cd19) chore(deps): bump github.com/tidwall/gjson from 1.14.3 to 1.14.4 (#2327) + * [87a0cb74](https://github.com/argoproj/argo-events/commit/87a0cb741a5f6da7f964f5883b53e9f74c925724) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.136 to 1.44.145 (#2330) + * [b3ff585c](https://github.com/argoproj/argo-events/commit/b3ff585ceb6b49cee305bcae8e2cafe63ded69b7) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.4.1 to 2.6.1 (#2326) + * [1bf92af6](https://github.com/argoproj/argo-events/commit/1bf92af6b1e08c9b027044c1a1848e4488dfb54b) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.42 to 7.0.44 (#2329) + * [01f1eee6](https://github.com/argoproj/argo-events/commit/01f1eee63b0901aca0f0ea4b3f33adc498fb0a38) chore(deps): bump github.com/xanzy/go-gitlab from 0.75.0 to 0.76.0 (#2328) + * [a4ac2053](https://github.com/argoproj/argo-events/commit/a4ac2053f43fee70efd394428784988ce6b374fa) chore(deps): bump github.com/xanzy/go-gitlab from 0.70.0 to 0.75.0 (#2320) + * [5b0bb312](https://github.com/argoproj/argo-events/commit/5b0bb31262a25ae52f4893495ad187ebbe9ce838) chore(deps): bump github.com/gavv/httpexpect/v2 from 2.2.0 to 2.4.1 (#2322) + * [9c95343e](https://github.com/argoproj/argo-events/commit/9c95343e0929441f529a30783ff2a4f0c9cdc1ec) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.2 to 1.1.3 (#2319) + * [3cc01b92](https://github.com/argoproj/argo-events/commit/3cc01b924cb9d5c023737e500be260e3b22f2bed) eventbus controller: move fuzzer from cncf-fuzzing (#2314) + * [4c944cc7](https://github.com/argoproj/argo-events/commit/4c944cc7bb007380a3022d5ae8af167478428c48) chore(deps): bump google.golang.org/api from 0.100.0 to 0.103.0 (#2310) + * [fe34f4d2](https://github.com/argoproj/argo-events/commit/fe34f4d257ff1e958b2ff802cbae6acc457a730f) chore(deps): bump github.com/nats-io/nats.go from 1.19.1 to 1.20.0 (#2311) + * [358419fd](https://github.com/argoproj/argo-events/commit/358419fd0c0d3796ada2a8c2b126c3cfe2eff305) chore(deps): bump github.com/spf13/viper from 1.12.0 to 1.14.0 (#2307) + * [b2599991](https://github.com/argoproj/argo-events/commit/b259999132bceb7ab906619276853532263dbfd7) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.126 to 1.44.136 (#2309) + * [73316f3c](https://github.com/argoproj/argo-events/commit/73316f3c6a82056e11ef14adf18838792bab13b1) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.1 to 1.1.2 (#2306) + * [33d45181](https://github.com/argoproj/argo-events/commit/33d45181f2ec20637d6755175cce9606a9e5f571) chore(deps): bump github.com/prometheus/client_golang from 1.13.1 to 1.14.0 (#2308) + * [bda9941c](https://github.com/argoproj/argo-events/commit/bda9941cc850d0c966ee2dfc90c20bf64bea9654) fix: payload serialization in sensor. Fixes #2272 (#2273) + * [8dd87ffb](https://github.com/argoproj/argo-events/commit/8dd87ffb3148a8795d59a44f16e37565e1f0fba9) fix: if key/value store already exists use that (#2293) + * [053b55f4](https://github.com/argoproj/argo-events/commit/053b55f4cf2ff42b523aa0d79f9fc9d39c3c4eeb) chore(deps): bump github.com/nats-io/nats.go from 1.19.0 to 1.19.1 (#2300) + * [a386d420](https://github.com/argoproj/argo-events/commit/a386d42046f19eefc1d6f6e369a968dd55fc523d) chore(deps): bump cloud.google.com/go/compute/metadata from 0.1.0 to 0.2.1 (#2303) + * [cf7339aa](https://github.com/argoproj/argo-events/commit/cf7339aa46c237e29ee82c38a82b34bbe9ce1867) chore(deps): bump dependabot/fetch-metadata from 1.3.4 to 1.3.5 (#2294) + * [db85de3e](https://github.com/argoproj/argo-events/commit/db85de3e3e5d9bbefbf3405de11b25957af44fd2) chore(deps): bump github.com/prometheus/client_golang from 1.13.0 to 1.13.1 (#2295) + * [261af2bd](https://github.com/argoproj/argo-events/commit/261af2bd90840034f042848f270a538499d94662) fix(docs): context filter documentation (#2277) + * [60fe12fc](https://github.com/argoproj/argo-events/commit/60fe12fcd3601e83f8e871ded4128a500d310387) chore(deps): bump cloud.google.com/go/compute from 1.10.0 to 1.12.1 (#2284) + * [94a5b775](https://github.com/argoproj/argo-events/commit/94a5b7754695d818da5ec23b79060fac93ed2f1a) chore(deps): bump github.com/spf13/cobra from 1.6.0 to 1.6.1 (#2283) + * [d4ba8666](https://github.com/argoproj/argo-events/commit/d4ba8666dcf5ed242374e663392a57b207a5509c) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.121 to 1.44.126 (#2289) + * [96b7fd4e](https://github.com/argoproj/argo-events/commit/96b7fd4e37e7e8e15ce8b353ac8e3a3afdb814cd) chore(deps): bump cloud.google.com/go/pubsub from 1.24.0 to 1.26.0 (#2288) + * [af10cd6c](https://github.com/argoproj/argo-events/commit/af10cd6c2aa2086b5194cfa9c2a9750fae280857) chore(deps): bump github.com/nats-io/nats.go from 1.18.0 to 1.19.0 (#2285) + * [f9ce3d98](https://github.com/argoproj/argo-events/commit/f9ce3d984e024ed06e61f1f9b6b132055b63b4d6) chore(deps): bump github.com/stretchr/testify from 1.8.0 to 1.8.1 (#2286) + * [7b92eb93](https://github.com/argoproj/argo-events/commit/7b92eb93ae42574e2d610f87e4870262b47c5009) chore(deps): bump actions/setup-go from 3.2.0 to 3.3.1 (#2271) + * [3e5ad7e0](https://github.com/argoproj/argo-events/commit/3e5ad7e06bd4b582b15d9cb252e90181c0eeb229) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.41 to 7.0.42 (#2269) + * [76d04380](https://github.com/argoproj/argo-events/commit/76d0438066b385751fb36a4c09829e43afa2c32d) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 from 3.3.18 to 3.3.19 (#2270) + * [336f1bf4](https://github.com/argoproj/argo-events/commit/336f1bf47b195571def3b81007730db9881fb719) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.116 to 1.44.121 (#2267) + * [c12f81b1](https://github.com/argoproj/argo-events/commit/c12f81b19b9dc4ea4358f7c8f6734f25c80076a7) feat: update third_party dependencies (#2245) + * [f25d2b05](https://github.com/argoproj/argo-events/commit/f25d2b058a389de7465c6bbc4db071af17e1ae3c) chore(deps): bump google.golang.org/api from 0.98.0 to 0.99.0 (#2259) + * [6993fed6](https://github.com/argoproj/argo-events/commit/6993fed6cc0175ef082975057da2d6ca1b1ffbfe) chore(deps): bump actions/setup-python from 4.2.0 to 4.3.0 (#2260) + * [3fa45d20](https://github.com/argoproj/argo-events/commit/3fa45d209b8262696bb9db17ac63269c124c2a07) chore(deps): bump github.com/fsnotify/fsnotify from 1.5.4 to 1.6.0 (#2258) + * [2ece19b2](https://github.com/argoproj/argo-events/commit/2ece19b2331771649070e3e02fa670068b0b8c9d) chore(deps): bump github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus from 1.1.0 to 1.1.1 (#2256) + * [5a12e7c6](https://github.com/argoproj/argo-events/commit/5a12e7c6f6af237c08e21a62b8310d666008ce3f) chore(deps): bump github.com/spf13/cobra from 1.5.0 to 1.6.0 (#2255) + * [21fd5b6b](https://github.com/argoproj/argo-events/commit/21fd5b6b5c3fd7d12f3b6e09ff9ef51777298563) chore(deps): bump google.golang.org/grpc from 1.50.0 to 1.50.1 (#2254) + * [e7b48445](https://github.com/argoproj/argo-events/commit/e7b484459f6543f84fbc38068083dda8b70f24fd) chore(deps): bump github.com/nats-io/nats.go from 1.17.0 to 1.18.0 (#2253) + * [c6f480cb](https://github.com/argoproj/argo-events/commit/c6f480cbf5982eeaba9106b5e06ddf4a82672cfc) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.114 to 1.44.116 (#2251) + * [5be8d1c1](https://github.com/argoproj/argo-events/commit/5be8d1c1f225c110b89c2ace902a61f91727a612) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.40 to 7.0.41 (#2252) + * [b2162be3](https://github.com/argoproj/argo-events/commit/b2162be303216f3977fffd50d02c314d995dcffb) fix(docs): partition as optional field for kafka eventsource fixes: #1502 (#2246) + * [3a0fb22e](https://github.com/argoproj/argo-events/commit/3a0fb22e2832f4b263db85b813a2428be9f68920) feat: Revision History Limit for sensor. Closes #1786 (#2244) + * [314f07ba](https://github.com/argoproj/argo-events/commit/314f07bace734d9d464a5ff413ecc7edbd9629ce) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.39 to 7.0.40 (#2241) + * [47d20f86](https://github.com/argoproj/argo-events/commit/47d20f864d3a5480ae45cdd696e8fb36a2133b1f) chore(deps): bump google.golang.org/grpc from 1.49.0 to 1.50.0 (#2240) + * [3278a094](https://github.com/argoproj/argo-events/commit/3278a09480d454fe7c1726a0c4368614884888cf) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.109 to 1.44.114 (#2239) + * [103675a5](https://github.com/argoproj/argo-events/commit/103675a575be57c1c85d16ec92d07112baaeface) feat: Azure Service Bus as EventSource (#2229) + * [c5e5cfc4](https://github.com/argoproj/argo-events/commit/c5e5cfc442fa43c6c5dc33987dcda36e2f438408) Property name typo in Expr filter documentation (#2231) + * [3ff81754](https://github.com/argoproj/argo-events/commit/3ff81754d89aa44dd78014f57a06bb6b8a937dee) chore(deps): bump dependabot/fetch-metadata from 1.3.3 to 1.3.4 (#2227) + * [c1af4fc6](https://github.com/argoproj/argo-events/commit/c1af4fc63702395eeb76fb807c82f98ff9921f1b) chore(deps): bump google.golang.org/api from 0.97.0 to 0.98.0 (#2226) + * [05b5764a](https://github.com/argoproj/argo-events/commit/05b5764a98b763be3b7e6f85de34b42be7c16f16) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.105 to 1.44.109 (#2224) + * [d5632222](https://github.com/argoproj/argo-events/commit/d563222249bb19dc387c3da381746ab17d195506) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.35 to 7.0.39 (#2225) + * [d4f53b67](https://github.com/argoproj/argo-events/commit/d4f53b674176421d5f07a5af696bdd5f967aacad) chore(deps): bump github.com/Shopify/sarama from 1.35.0 to 1.37.0 (#2223) + +### Contributors + + * Aalok Ahluwalia + * AdamKorcz + * Derek Wang + * GoshaDo + * Jorge + * Julie Vogelman + * Ole-Martin Bratteng + * Peter Hoellig + * Prema + * dependabot[bot] + * emmayylu + * joepk + +## v1.7.3 (2022-09-28) + + * [c11f9147](https://github.com/argoproj/argo-events/commit/c11f91471077d781950451dbd41096b2f3d65d94) Update manifests to v1.7.3 + * [7e6a9a57](https://github.com/argoproj/argo-events/commit/7e6a9a57e8def499c301806ef6b2f64379f6ea31) Make port configurable in webhook, default is set to 443 (#2215) + * [8441b643](https://github.com/argoproj/argo-events/commit/8441b643a8bd87a8f2c30403ce6af93ddcc4d927) feat: Kafka es discontinues processing if eb publishing fails (#2214) + * [63d40612](https://github.com/argoproj/argo-events/commit/63d40612789bac17a80a3ff07b76632865529d19) chore(deps): bump actions/stale from 5 to 6 (#2213) + * [688259f7](https://github.com/argoproj/argo-events/commit/688259f780eda92dd5ea1a1d79a92f990d0eff81) chore(deps): bump google.golang.org/api from 0.93.0 to 0.97.0 (#2211) + * [179cc075](https://github.com/argoproj/argo-events/commit/179cc075344f89233ecf8b540235ffa04d9db3e0) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.81 to 1.44.105 (#2208) + * [d3d22376](https://github.com/argoproj/argo-events/commit/d3d223767a9a9d254e19867b83d1a55e134f15e2) fix filter for github sensor example (#2188) + * [1c68b590](https://github.com/argoproj/argo-events/commit/1c68b590b61e667b69fb7640e0227e698f641ab3) fix: Emissary executor reads and writes to WorkflowTaskResults, not Pods (#2189) + * [3ea1f575](https://github.com/argoproj/argo-events/commit/3ea1f5757b424d2da7d7ee65ce70a39c4ffd9ff7) chore(deps): bump github.com/nats-io/nats.go from 1.16.0 to 1.17.0 (#2197) + +### Contributors + + * Derek Wang + * Julie Vogelman + * Matthieu Simon + * Nir Shtein + * dependabot[bot] + +## v1.7.2 (2022-09-12) + + * [1d3877a4](https://github.com/argoproj/argo-events/commit/1d3877a40bac56a9331d7c6fd15d1d9aa9e5e83d) Update manifests to v1.7.2 + * [2ecd6a70](https://github.com/argoproj/argo-events/commit/2ecd6a700c5de68bc2066fa903945723c8af596d) feat: Webhook event source to support filtering (#2178) + * [f7bfd1c5](https://github.com/argoproj/argo-events/commit/f7bfd1c5d30b637f7c9225e26a50dddc06b2cd60) fix: dependency should use % (#2175) + * [8d611286](https://github.com/argoproj/argo-events/commit/8d6112863e8692ec990a6ad5c70245f8353b3871) fix: error is swallowed after retry failure (#2160) + * [985e7481](https://github.com/argoproj/argo-events/commit/985e7481532f81cd459693dec9d1968a299823ae) feat: Kafka eventsource supports Sarama config customization (#2161) + * [e52ad749](https://github.com/argoproj/argo-events/commit/e52ad749397ef675b69c8eeb4aa7bcb2dbf4c283) feat: retry failed eventbus message publishing (#2162) + * [e0bae390](https://github.com/argoproj/argo-events/commit/e0bae39016d06dbf483f8d6791d6904e0638a1dd) feat: Add option to configure NATS max_payload in JetStream eventbus (#2164) + * [82e0ae1c](https://github.com/argoproj/argo-events/commit/82e0ae1cf9aafc3bfe2255659368ff582f3a4362) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.34 to 7.0.35 (#2167) + * [2fdb0b13](https://github.com/argoproj/argo-events/commit/2fdb0b135cec22ccc12b31ce014197d506c809c1) chore(deps): bump github.com/itchyny/gojq from 0.12.8 to 0.12.9 (#2165) + * [73755456](https://github.com/argoproj/argo-events/commit/73755456b4e1c2d36473a5ddf1060c4b77569b09) fix: Increase random sleep range in Bitbucket eventsource (#2148) + * [a82ccd31](https://github.com/argoproj/argo-events/commit/a82ccd3128b7d357d1a0968933a7fdf357ed0bc4) chore(deps): bump google.golang.org/grpc from 1.48.0 to 1.49.0 (#2149) + * [5d102a2a](https://github.com/argoproj/argo-events/commit/5d102a2a7e7afdc9b0b1b1f285cffe8ef39b1e38) solved deleteHookOnFinish race condition + removed hooks daemon mitigation (#2145) + * [f31cd46e](https://github.com/argoproj/argo-events/commit/f31cd46ed5dc4107f2dfca1e3f2cfb803f572ea0) chore(deps): bump github.com/tidwall/gjson from 1.14.2 to 1.14.3 (#2143) + * [91a12483](https://github.com/argoproj/argo-events/commit/91a12483ebe99fddf36a01d8f204aa417e8cb1eb) chore(deps): bump google.golang.org/api from 0.91.0 to 0.93.0 (#2144) + * [78044d66](https://github.com/argoproj/argo-events/commit/78044d66ef40b5b4c08fe726e36e0292549e1219) feat: AWS Temporary credential support for SQS eventsource (#2092) + * [a1df80c1](https://github.com/argoproj/argo-events/commit/a1df80c1863831dba07a95e0eecd2ac8cc869a60) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.10.1 to 2.11.0 (#2133) + * [f85d207c](https://github.com/argoproj/argo-events/commit/f85d207c92fb11e48bc732b25aaa28a1f84e9287) chore(deps): bump cloud.google.com/go/compute from 1.7.0 to 1.9.0 (#2142) + * [7e6d4e6e](https://github.com/argoproj/argo-events/commit/7e6d4e6e20ef4af8e7bc3972b76c662d1754e294) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.70 to 1.44.81 (#2141) + * [081ac2f8](https://github.com/argoproj/argo-events/commit/081ac2f821b4037db1cea782ed156487eed63b10) fix: added controller config validation (#2103) + * [ea023697](https://github.com/argoproj/argo-events/commit/ea0236970ecbf4be446555eedc6cf856b247b997) feat: submit from workflowtemplate (#2120) + * [cd23dde5](https://github.com/argoproj/argo-events/commit/cd23dde5ea0eb0b2e66b8059ec5e7a9ef9239f2c) chore(deps): bump google.golang.org/api from 0.90.0 to 0.91.0 (#2122) + * [9f8ffe91](https://github.com/argoproj/argo-events/commit/9f8ffe9100f1a322826f133bd4b349b0e6ae80e7) chore(deps): bump actions/setup-python from 4.1.0 to 4.2.0 (#2129) + * [7821d9c2](https://github.com/argoproj/argo-events/commit/7821d9c2b072d91da8997f3939f98cc921a060b9) chore(deps): bump github.com/tidwall/gjson from 1.14.1 to 1.14.2 (#2123) + * [a0648352](https://github.com/argoproj/argo-events/commit/a064835243c5bf096b6d1a263b8253b07da3da75) chore(deps): bump github.com/prometheus/client_golang (#2124) + * [b126982d](https://github.com/argoproj/argo-events/commit/b126982d0d1b17ff8a09a9adafb180f7276de175) chore(deps): bump github.com/xanzy/go-gitlab from 0.69.0 to 0.70.0 (#2128) + * [c1d99913](https://github.com/argoproj/argo-events/commit/c1d999137d43018a4b0282db41cc68e95f0f57ce) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.61 to 1.44.70 (#2125) + * [ecd934e9](https://github.com/argoproj/argo-events/commit/ecd934e9f9cdd316ae54712dc0daed474515604f) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.32 to 7.0.34 (#2126) + * [f40ebc69](https://github.com/argoproj/argo-events/commit/f40ebc69ed01263e6813370619f4dd1be5fd5bdd) chore(deps): bump github.com/nats-io/stan.go from 0.10.2 to 0.10.3 (#2119) + * [500ff894](https://github.com/argoproj/argo-events/commit/500ff894f6109526a335f9551d187527326b18c7) chore(deps): bump google.golang.org/api from 0.88.0 to 0.90.0 (#2117) + * [f0ce5266](https://github.com/argoproj/argo-events/commit/f0ce526610594d2d838fb8827985d4307d49e5b8) chore(deps): bump github.com/slack-go/slack from 0.11.1 to 0.11.2 (#2118) + * [697f49b7](https://github.com/argoproj/argo-events/commit/697f49b73fd239ed485f5cd70ccf7f4bc757245b) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.31 to 7.0.32 (#2115) + * [a3c936c2](https://github.com/argoproj/argo-events/commit/a3c936c2a30b94c6bf4e3d6c5f5679c1e87e8daf) fix: CVE-2022-1996 (#2111) + * [47e74b54](https://github.com/argoproj/argo-events/commit/47e74b5468bed40ab0f6f6223bc2bb24355dbecd) fix: return error in case param value resolving fails (#2112) + * [ceeede1a](https://github.com/argoproj/argo-events/commit/ceeede1a3a07dff68f1833cddd7e49a1db464351) fix: add SCRAM functionality in Kafka triggers, SCRAM-SHA512/256 SASL (#2087) (#2091) + * [e021d48a](https://github.com/argoproj/argo-events/commit/e021d48a8847080489f72f0471d550a2645fbf7f) fix: update bitbucket es example yaml (#2113) + * [b08721f3](https://github.com/argoproj/argo-events/commit/b08721f3a2e66fa6ce29403434ba970553594d10) chore(deps): bump github.com/Shopify/sarama from 1.34.1 to 1.35.0 (#2100) + * [2b718c89](https://github.com/argoproj/argo-events/commit/2b718c8991b88ad2372beaefc11f51746739f3a7) chore(deps): bump github.com/rabbitmq/amqp091-go from 1.3.4 to 1.4.0 (#2099) + * [684ed67c](https://github.com/argoproj/argo-events/commit/684ed67c158fcc639d5aad18393615771322163d) fix: Removed usage of projectKey field in bitbucket eventsource (#2109) + * [b1d449c1](https://github.com/argoproj/argo-events/commit/b1d449c10ac3fec87b1f4275ab3e97349a75dc4f) chore(deps): bump github.com/slack-go/slack from 0.11.0 to 0.11.1 (#2097) + * [5e35f18a](https://github.com/argoproj/argo-events/commit/5e35f18a0f78a51efe54c58de1ca19aed24f712f) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.56 to 1.44.61 (#2098) + * [227a2db7](https://github.com/argoproj/argo-events/commit/227a2db7e60279007fe035fdb79c2112dbd7399b) fix: flaky e2e test cases (#2102) + * [1ba564d1](https://github.com/argoproj/argo-events/commit/1ba564d173e4900f75d05f4ee1bd1f1b37ad66be) chore(deps): bump google.golang.org/api from 0.87.0 to 0.88.0 (#2096) + * [62b1870d](https://github.com/argoproj/argo-events/commit/62b1870de9670e008cdbe8680e173a1ce8841dbf) chore(deps): bump cloud.google.com/go/pubsub from 1.23.1 to 1.24.0 (#2095) + * [8c715122](https://github.com/argoproj/argo-events/commit/8c715122586ece3bc6bc38f4ba8be842fd4862d6) feat: Make max request payload size configurable in all git/webhook related eventsources (#2093) + * [156de21b](https://github.com/argoproj/argo-events/commit/156de21b9473a6acfe30a2311c51aabeb55b31b8) chore(deps): bump google.golang.org/grpc from 1.47.0 to 1.48.0 (#2079) + * [ae9611db](https://github.com/argoproj/argo-events/commit/ae9611dbd5d1ffb278266190c906ade4a451ae73) chore(deps): bump github.com/xanzy/go-gitlab from 0.68.2 to 0.69.0 (#2082) + * [ad9d638f](https://github.com/argoproj/argo-events/commit/ad9d638f77e908eb1b70a1175e75fda6ecc225e6) chore(deps): bump google.golang.org/api from 0.86.0 to 0.87.0 (#2080) + * [736a5c7c](https://github.com/argoproj/argo-events/commit/736a5c7ca1e939663c8cf10f82db0c7f7325976e) chore(deps): bump github.com/bradleyfalzon/ghinstallation/v2 (#2081) + * [a3cad702](https://github.com/argoproj/argo-events/commit/a3cad7024a74b00c015b091aeb73399007a68c4a) chore(deps): bump actions/setup-python from 4.0.0 to 4.1.0 (#2084) + * [c81d2b17](https://github.com/argoproj/argo-events/commit/c81d2b178698f4b85bade422e18ca8289e20ac60) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.51 to 1.44.56 (#2083) + * [f10eeb90](https://github.com/argoproj/argo-events/commit/f10eeb904688a7814da3fece34b1562c258b698b) Update quick_start.md (#2072) + * [5f140239](https://github.com/argoproj/argo-events/commit/5f1402395925f894572d70976b575764f5a90d38) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.30 to 7.0.31 (#2071) + * [fedb5359](https://github.com/argoproj/argo-events/commit/fedb53590cc5d107359be605bdf4a8bb9bb12989) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.47 to 1.44.51 (#2070) + * [b8952b28](https://github.com/argoproj/argo-events/commit/b8952b28717768c5d60ac979365178f84e5bf65f) feat: Add support for custom SNS endpoint (#2067) + * [6686429d](https://github.com/argoproj/argo-events/commit/6686429d7db5d1c2c94bc3a489907973a459e753) chore(deps): bump cloud.google.com/go/pubsub from 1.23.0 to 1.23.1 (#2061) + * [dd3a18df](https://github.com/argoproj/argo-events/commit/dd3a18dfaa14416aa4f9f0502f16ba3bb9a74666) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.29 to 7.0.30 (#2065) + * [b2037fb3](https://github.com/argoproj/argo-events/commit/b2037fb3d875266a23216db6864e81fc63616ca6) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.42 to 1.44.47 (#2064) + * [ead25fb5](https://github.com/argoproj/argo-events/commit/ead25fb5a1a50f2f0188270ad15a3038acb43e6b) chore(deps): bump dependabot/fetch-metadata from 1.3.1 to 1.3.3 (#2066) + * [4d2eb11b](https://github.com/argoproj/argo-events/commit/4d2eb11bd07daddcbef6e187767b80f9d197266f) chore(deps): bump github.com/xanzy/go-gitlab from 0.68.0 to 0.68.2 (#2063) + * [524e8f50](https://github.com/argoproj/argo-events/commit/524e8f50b536adbfb5c913cecffb39a43a357b56) chore(deps): bump github.com/stretchr/testify from 1.7.5 to 1.8.0 (#2062) + * [4412f22e](https://github.com/argoproj/argo-events/commit/4412f22e8756ddc4a48d8945434e9dfb38b8108f) chore(deps): bump google.golang.org/api from 0.85.0 to 0.86.0 (#2060) + * [8070dfca](https://github.com/argoproj/argo-events/commit/8070dfcabf149e6adb7218c629a5929677330916) chore(deps): bump cloud.google.com/go/pubsub from 1.22.2 to 1.23.0 (#2048) + * [d7d86ba9](https://github.com/argoproj/argo-events/commit/d7d86ba9a05637657bfd759304a0ebfd0f95dd77) chore(deps): bump github.com/argoproj/pkg from 0.13.3 to 0.13.6 (#2052) + * [0d623a9a](https://github.com/argoproj/argo-events/commit/0d623a9af33973cff194ace3b6a5fd45e0ad1083) chore(deps): bump github.com/stretchr/testify from 1.7.2 to 1.7.5 (#2053) + * [8d19da79](https://github.com/argoproj/argo-events/commit/8d19da7964a142b1e1a32966439a3af27b46e4a7) chore(deps): bump google.golang.org/api from 0.84.0 to 0.85.0 (#2051) + * [723d6ce5](https://github.com/argoproj/argo-events/commit/723d6ce59844bd593eac9d571f270c2503beee17) chore(deps): bump github.com/spf13/cobra from 1.4.0 to 1.5.0 (#2050) + * [062e314a](https://github.com/argoproj/argo-events/commit/062e314ae631d718260c7f88d9ec2804fd07d820) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.37 to 1.44.42 (#2049) + * [fb5f6458](https://github.com/argoproj/argo-events/commit/fb5f6458a2d14baf301ec7c5d4f562e749081a7a) chore(deps): bump cloud.google.com/go/compute from 1.6.1 to 1.7.0 (#2039) + * [d6da1573](https://github.com/argoproj/argo-events/commit/d6da15731727e52215b82ffbb6d63531fe543eaf) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.47 to 0.9.48 (#2041) + * [bf33d677](https://github.com/argoproj/argo-events/commit/bf33d677062ab8deaaeb2d6945bb66cabf669fbd) chore(deps): bump github.com/argoproj/pkg from 0.13.1 to 0.13.3 (#2037) + * [b9f93179](https://github.com/argoproj/argo-events/commit/b9f93179dc9ead3c600ddd4105940e920127c8a3) chore(deps): bump github.com/slack-go/slack from 0.10.3 to 0.11.0 (#2040) + * [814464b1](https://github.com/argoproj/argo-events/commit/814464b1164c039f401c3b2f9e8599f86dcf3d3b) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.27 to 7.0.29 (#2038) + * [98a6e843](https://github.com/argoproj/argo-events/commit/98a6e84384b2150911f889bb837ae3c63546174a) chore(deps): bump google.golang.org/api from 0.83.0 to 0.84.0 (#2035) + * [23d31528](https://github.com/argoproj/argo-events/commit/23d31528c85540874282b5f5a05f6d3eb370b822) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.32 to 1.44.37 (#2036) + * [5e7cc1f3](https://github.com/argoproj/argo-events/commit/5e7cc1f344f321f7015dd1abe94549d36a100364) feat: Added multiple repos support for bitbucket eventsource (#2031) + * [7a221484](https://github.com/argoproj/argo-events/commit/7a22148412a8091c0d7bda3ff1d40beb9262c8e7) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.10.0 to 2.10.1 (#2027) + * [fa547708](https://github.com/argoproj/argo-events/commit/fa54770859feeb51a005e4eb26331657f9338716) chore(deps): bump google.golang.org/api from 0.82.0 to 0.83.0 (#2029) + * [25c8e12e](https://github.com/argoproj/argo-events/commit/25c8e12eed0c8618099eb8745174912c4268f769) chore(deps): bump actions/setup-python from 3.1.2 to 4.0.0 (#2030) + * [04b189e1](https://github.com/argoproj/argo-events/commit/04b189e14b79c6bdeb97c0cfe4634a311335a8c0) chore(deps): bump github.com/Shopify/sarama from 1.34.0 to 1.34.1 (#2028) + * [b68fbeeb](https://github.com/argoproj/argo-events/commit/b68fbeeb90523aab59f11307b72052bffae0a2dd) chore(deps): bump github.com/eclipse/paho.mqtt.golang (#2023) + * [d1266e17](https://github.com/argoproj/argo-events/commit/d1266e171e98c24cb3971f462cf1d9619db70c1d) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.46 to 0.9.47 (#2025) + * [b63d380b](https://github.com/argoproj/argo-events/commit/b63d380bc03bcce5f862fbfc0df33b7cbc7e4baa) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.27 to 1.44.32 (#2024) + * [78f2fea2](https://github.com/argoproj/argo-events/commit/78f2fea2134ffd95c2f9823f52f7e08e05f92749) chore(deps): bump google.golang.org/api from 0.81.0 to 0.82.0 (#2017) + * [b750804b](https://github.com/argoproj/argo-events/commit/b750804b2a783680d971f35d824cf90ab9ab346b) chore(deps): bump github.com/Shopify/sarama from 1.33.0 to 1.34.0 (#2013) + +### Contributors + + * Amirio + * Daniel + * Derek Wang + * Harshdeep Singh + * Ian McGraw + * Julie Vogelman + * Patrick Marx + * avasiliev + * dependabot[bot] + +## v1.7.1 (2022-06-08) + + * [a98978a3](https://github.com/argoproj/argo-events/commit/a98978a38dfc90299dc805089716ecba8374461a) Update manifests to v1.7.1 + * [b03f5c70](https://github.com/argoproj/argo-events/commit/b03f5c707326136d06d326ebf7948e6022d6c36c) chore(deps): bump cloud.google.com/go/pubsub from 1.21.1 to 1.22.2 (#2016) + * [eb669285](https://github.com/argoproj/argo-events/commit/eb66928544a8af915e89d3534eb0e82ff641790b) chore(deps): bump google.golang.org/grpc from 1.46.2 to 1.47.0 (#2014) + * [476cf56e](https://github.com/argoproj/argo-events/commit/476cf56e05ba960c50eda8d838267b8c8f8d179a) chore(deps): bump github.com/itchyny/gojq from 0.12.7 to 0.12.8 (#2015) + * [22ad8c1f](https://github.com/argoproj/argo-events/commit/22ad8c1fdb84b9fb1fc8481651ae9ea5c25f919d) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.24 to 1.44.27 (#2012) + * [dcb32d41](https://github.com/argoproj/argo-events/commit/dcb32d41782651bb6033b79d09d8b9932f2850cf) chore(deps): bump github.com/imdario/mergo from 0.3.12 to 0.3.13 (#2001) + * [e1d54ed7](https://github.com/argoproj/argo-events/commit/e1d54ed74306487d4cd93f4a653a257bd4b92263) Add support for Redis ACL auth (#2007) + * [d549502d](https://github.com/argoproj/argo-events/commit/d549502d23bba5d2e503bba551d8b268bd474ca8) chore(deps): bump actions/setup-go from 3.1.0 to 3.2.0 (#2006) + * [9e26ddbb](https://github.com/argoproj/argo-events/commit/9e26ddbb7b9cbbeb880063f9a5da50afd5f067d4) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.26 to 7.0.27 (#2005) + * [c1b77733](https://github.com/argoproj/argo-events/commit/c1b77733b236949dc0332b924bc3fe4c17df1384) chore(deps): bump github.com/spf13/viper from 1.11.0 to 1.12.0 (#2004) + * [d03051bb](https://github.com/argoproj/argo-events/commit/d03051bb4b7170a4c3d0dfbd4d1ed43c1cc939ab) chore(deps): bump github.com/nats-io/nats.go from 1.15.0 to 1.16.0 (#2003) + * [28531dfa](https://github.com/argoproj/argo-events/commit/28531dfa207e9411093897bd547e3323415ddaca) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.21 to 1.44.24 (#2002) + * [15cfd7f6](https://github.com/argoproj/argo-events/commit/15cfd7f6580366340807d6acb6dbf4d22308bc0e) chore(deps): bump github.com/xanzy/go-gitlab from 0.65.0 to 0.68.0 (#1995) + * [48b38900](https://github.com/argoproj/argo-events/commit/48b389000f6b5fd30ae33ba6503fcd4ee5c21479) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.19 to 1.44.21 (#1993) + * [49121eab](https://github.com/argoproj/argo-events/commit/49121eab0064a53297bce96ab82b4ecd28037e28) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.45 to 0.9.46 (#1994) + * [fe48c003](https://github.com/argoproj/argo-events/commit/fe48c00379acf487671e1b07b065b00695c45510) chore(deps): bump google.golang.org/api from 0.80.0 to 0.81.0 (#1992) + * [e1fab1dd](https://github.com/argoproj/argo-events/commit/e1fab1dde13b43bcfd522c0b0139de9936dc79b7) fix: jetstream statefulset resource setting (#1989) + * [656b99de](https://github.com/argoproj/argo-events/commit/656b99ded768b48f823bb788f5bc03a28f1b4ca2) feat: add support to parse QueryParameter and PostForm on webhook eve… (#1978) + * [0a9206d8](https://github.com/argoproj/argo-events/commit/0a9206d88b89227dd8df33f874a23e5896b2b7b2) chore(deps): bump google.golang.org/api from 0.79.0 to 0.80.0 (#1985) + * [248b6164](https://github.com/argoproj/argo-events/commit/248b61641ff37162ac57ffce75546dc75b525214) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.9.0 to 2.10.0 (#1984) + * [80863521](https://github.com/argoproj/argo-events/commit/808635214ce7cc80f222b3961043b97ea56c4a7b) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.14 to 1.44.19 (#1983) + * [b08d6379](https://github.com/argoproj/argo-events/commit/b08d6379e18fd9a887e225918811f65220cebd1d) fix: make access token optional (#1976) + * [52a26202](https://github.com/argoproj/argo-events/commit/52a2620242ee54c80e80fd4de702ccba7887454d) fix: Limit github hooks manager daemon lifetime to 10 min (#1930) + * [11c547fd](https://github.com/argoproj/argo-events/commit/11c547fdfb1401a65835e73163cac737e53148ba) chore(deps): bump google.golang.org/grpc from 1.46.0 to 1.46.2 (#1974) + * [6fa9a133](https://github.com/argoproj/argo-events/commit/6fa9a13367b77cc4064d48155e1a9e6a8ce497e7) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.9 to 1.44.14 (#1973) + * [69a4b4ae](https://github.com/argoproj/argo-events/commit/69a4b4ae2e8f673a6e018101ea1104fe80e55f80) chore(deps): bump actions/setup-go from 3.0.0 to 3.1.0 (#1975) + * [a66da058](https://github.com/argoproj/argo-events/commit/a66da05877cbc41a51d1f4a4cb6d4455aedeb47b) chore(deps): bump github.com/prometheus/client_golang (#1971) + * [1e8e81ed](https://github.com/argoproj/argo-events/commit/1e8e81edb49354087968429e52244d89042f8b1d) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.44 to 0.9.45 (#1969) + * [2bbe22c4](https://github.com/argoproj/argo-events/commit/2bbe22c4a5bbf2b22996bb55df9fedbaddb29215) chore(deps): bump google.golang.org/api from 0.78.0 to 0.79.0 (#1970) + * [1c0cfff4](https://github.com/argoproj/argo-events/commit/1c0cfff48d2d2dbf4297f3471dada3767ed5efa8) chore(deps): bump github.com/xanzy/go-gitlab from 0.64.0 to 0.65.0 (#1968) + * [78876451](https://github.com/argoproj/argo-events/commit/788764514be21a646cf08ec64708384a41e86eaa) chore(deps): bump github.com/Shopify/sarama from 1.32.0 to 1.33.0 (#1967) + * [d0f66dbc](https://github.com/argoproj/argo-events/commit/d0f66dbce78bc31923ca057b20fc722aa24ca961) fix: git artifactory arbitrary file read issue (#1965) + * [e80ab9f9](https://github.com/argoproj/argo-events/commit/e80ab9f9556f7f97346fb393e312f8a689ca21f8) feat: add support of submit from existing resource (#1908) (#1941) + * [3861f8a7](https://github.com/argoproj/argo-events/commit/3861f8a72c50284b8cc35975b64418dad6717f0e) fix: use crypto/rand instead of math/rand (#1959) + * [9f243f67](https://github.com/argoproj/argo-events/commit/9f243f67cbe6158c4de38cf368e2515ca9b1c0e8) fix: add nil check on sensor spec validation (#1961) + * [f4b6b04b](https://github.com/argoproj/argo-events/commit/f4b6b04b681bff5802ac55b15b093c7f9585fa90) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.24 to 7.0.26 (#1939) + * [4cb6cad8](https://github.com/argoproj/argo-events/commit/4cb6cad85824bd72cda14e392e6382dc9cdb9aaa) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.8 to 1.44.9 (#1938) + * [6fe18099](https://github.com/argoproj/argo-events/commit/6fe18099bff207e768774d5e69d782115e7c0553) feat: Expand e2e tests (#1859) + * [6284920c](https://github.com/argoproj/argo-events/commit/6284920cb864a313c212d2d50bc3bb7918737a72) chore(deps): bump docker/setup-buildx-action from 1 to 2 (#1935) + * [480fd9f3](https://github.com/argoproj/argo-events/commit/480fd9f334b33eb1c5c2a0f1581caa997d1bc465) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.7 to 1.44.8 (#1937) + * [6c6a2102](https://github.com/argoproj/argo-events/commit/6c6a2102d4dd832b75d15480b8e201049f378f74) chore(deps): bump docker/login-action from 1 to 2 (#1936) + * [ed5eb358](https://github.com/argoproj/argo-events/commit/ed5eb358e649043684aaa416b4d7f0d5cdb97fc5) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.6 to 1.44.7 (#1933) + * [a4d23d23](https://github.com/argoproj/argo-events/commit/a4d23d23b1ab943cc0e90dedca2dbad20249c146) chore(deps): bump github.com/nats-io/nats.go from 1.14.0 to 1.15.0 (#1932) + * [81025a67](https://github.com/argoproj/argo-events/commit/81025a67af8317c2ffe9922055d0c08385a3e775) chore(deps): bump cloud.google.com/go/pubsub from 1.21.0 to 1.21.1 (#1931) + +### Contributors + + * Daniel + * Derek Wang + * Ivan Babiankou + * Julie Vogelman + * Omar Elbanby + * Tom Elliff-O'Shea + * dependabot[bot] + * shirou + +## v1.7.0 (2022-05-04) + + * [6b10ce4d](https://github.com/argoproj/argo-events/commit/6b10ce4d463c312085fb33f6be12cfd137ea19bc) Update manifests to v1.7.0 + * [14a6d1c2](https://github.com/argoproj/argo-events/commit/14a6d1c27ef85d0b80bd6e9087fbfa1929b715a2) chore(deps): bump google.golang.org/api from 0.77.0 to 0.78.0 (#1924) + * [2736104d](https://github.com/argoproj/argo-events/commit/2736104d08c9f96dd76afe37e8a737ed0d6368f6) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.5 to 1.44.6 (#1923) + * [a07013e4](https://github.com/argoproj/argo-events/commit/a07013e41de1ee15c1232feb9e77cc19420aaa52) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.4 to 1.44.5 (#1922) + * [f3cf34e5](https://github.com/argoproj/argo-events/commit/f3cf34e50aa01a956b806b82a8ba5104a3068d17) fix: Enforce webhook secret in BitbucketServer event source (#1917) + * [bbe135f7](https://github.com/argoproj/argo-events/commit/bbe135f730402f05a949045dc12fa066e1a20ca2) chore(deps): bump google.golang.org/api from 0.76.0 to 0.77.0 (#1919) + * [cfe47a04](https://github.com/argoproj/argo-events/commit/cfe47a0436adaa14d017fdcce2a8c08b1671c4f3) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.2 to 1.44.4 (#1918) + * [3781f13c](https://github.com/argoproj/argo-events/commit/3781f13c2d2b410732bc4d36777508e54d033d14) added script filter (#1894) + * [56f202ac](https://github.com/argoproj/argo-events/commit/56f202ac2bcbeb91f77e5a0b90df91cb698c17f2) fix: Bitbucketserver webhook secret optional (#1902) + * [881fc7fb](https://github.com/argoproj/argo-events/commit/881fc7fb91d85566abc7a48c1cc69befd0545918) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 (#1912) + * [90415435](https://github.com/argoproj/argo-events/commit/90415435a921de3d8f8f12d00a1a83775e1cb44e) chore(deps): bump github.com/argoproj/pkg from 0.13.0 to 0.13.1 (#1911) + * [d754f181](https://github.com/argoproj/argo-events/commit/d754f18158e5cf85a4b6dad93bf46004a84bd4f6) chore(deps): bump google.golang.org/api from 0.75.0 to 0.76.0 (#1910) + * [191730cc](https://github.com/argoproj/argo-events/commit/191730cca0c21f67a485ac75021164cf591b6c35) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.1 to 1.44.2 (#1909) + * [e324e0e0](https://github.com/argoproj/argo-events/commit/e324e0e0fd244996ff2a3422e1dff55837e7d7d0) chore(deps): bump github.com/fsnotify/fsnotify from 1.5.3 to 1.5.4 (#1907) + * [a417a8be](https://github.com/argoproj/argo-events/commit/a417a8be4e6c0b47c2aa5ab3c11939317ac6c895) chore(deps): bump github.com/aws/aws-sdk-go from 1.44.0 to 1.44.1 (#1906) + * [a3a7fc6f](https://github.com/argoproj/argo-events/commit/a3a7fc6fc686d19fe9c36c1932254582689e3f64) chore(deps): bump github.com/google/go-cmp from 0.5.7 to 0.5.8 (#1905) + * [e4d6338a](https://github.com/argoproj/argo-events/commit/e4d6338a6acb065e36372903e08893ee88366f00) chore(deps): bump cloud.google.com/go/pubsub from 1.20.0 to 1.21.0 (#1904) + * [dfcb8056](https://github.com/argoproj/argo-events/commit/dfcb8056d0c919b7f936eb85d824ba8666eedebb) chore(deps): bump github.com/argoproj/pkg from 0.12.0 to 0.13.0 (#1903) + * [dcd13f2c](https://github.com/argoproj/argo-events/commit/dcd13f2cf9a0817a6f2650ac6741af0f27eac116) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.45 to 1.44.0 (#1900) + * [2df44805](https://github.com/argoproj/argo-events/commit/2df44805cd2ac7fbb2c13973a76764f3184fdace) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.44 to 1.43.45 (#1898) + * [1a27bcb2](https://github.com/argoproj/argo-events/commit/1a27bcb2e3c63510870ca9c674a0240a0bc54b65) chore(deps): bump github.com/xanzy/go-gitlab from 0.63.0 to 0.64.0 (#1897) + * [650d83f8](https://github.com/argoproj/argo-events/commit/650d83f84bd407b5743b2f0137c51434c61a553c) chore(deps): bump github.com/slack-go/slack from 0.10.2 to 0.10.3 (#1896) + * [0950594c](https://github.com/argoproj/argo-events/commit/0950594cf5daeb47c1612071e412a5c5972916b2) chore(deps): bump google.golang.org/grpc from 1.45.0 to 1.46.0 (#1895) + * [f6924a09](https://github.com/argoproj/argo-events/commit/f6924a0927f4a13727c1aff68d939cc3047c35d7) Fix a concurrency bug in stress test, plus clean up of e2e tests (#1882) + +### Contributors + + * Daniel + * Derek Wang + * Julie Vogelman + * Vaibhav + * dependabot[bot] + +## v1.7.0-rc1 (2022-04-23) + + * [5b2905b0](https://github.com/argoproj/argo-events/commit/5b2905b04688ccd43cffb15329e5a9a106748eea) Update manifests to v1.7.0-rc1 + * [8a978529](https://github.com/argoproj/argo-events/commit/8a97852955747a6212e9cb4543399e15f884002d) feat: Controller Leader Election (#1883) + * [66cb7c4f](https://github.com/argoproj/argo-events/commit/66cb7c4f7a34eb5059740070c7b970d4f0a3e60f) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.43 to 1.43.44 (#1886) + * [339cccd3](https://github.com/argoproj/argo-events/commit/339cccd3749661acf4f6efeaf0f2e37e8b07b352) chore(deps): bump cloud.google.com/go/compute from 1.6.0 to 1.6.1 (#1884) + * [df6d4d7c](https://github.com/argoproj/argo-events/commit/df6d4d7c72ebae7d42a58c9d710b146dcd4b9160) chore(deps): bump github.com/fsnotify/fsnotify from 1.5.2 to 1.5.3 (#1885) + * [689110fa](https://github.com/argoproj/argo-events/commit/689110faddbcf007b48217280dafc6f71cb02677) chore(deps): bump github.com/argoproj/pkg from 0.11.0 to 0.12.0 (#1878) + * [f58ee413](https://github.com/argoproj/argo-events/commit/f58ee413399b9011815db19f12d27ea292663ff3) fix minio put/delete triggers (#1873) + * [45e40d89](https://github.com/argoproj/argo-events/commit/45e40d8996514e409ad659a3ba969f5d7a496cba) feat: Jetstream allows multiple dependencies to refer to the same Event (#1870) + * [13341ef6](https://github.com/argoproj/argo-events/commit/13341ef6c6f4bf9b9ecf3d184cb14fb06d81d366) feat: Incorporating Jetstream bus option into stress test (#1862) + * [89941a76](https://github.com/argoproj/argo-events/commit/89941a761ae0b73690d85dae94152f9029cf3d32) chore(deps): bump google.golang.org/api from 0.74.0 to 0.75.0 (#1879) + * [bc300410](https://github.com/argoproj/argo-events/commit/bc3004107b9868f3672d8a49f5a60db17e333183) chore(deps): bump github.com/fsnotify/fsnotify from 1.5.1 to 1.5.2 (#1877) + * [66726529](https://github.com/argoproj/argo-events/commit/6672652901d1157d4d9e633f2cddd4f119483609) chore(deps): bump github.com/mitchellh/mapstructure from 1.4.3 to 1.5.0 (#1875) + * [948063eb](https://github.com/argoproj/argo-events/commit/948063ebb003f94d5b7adbd5ecf0e67cf32075b1) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.42 to 1.43.43 (#1876) + * [41135856](https://github.com/argoproj/argo-events/commit/41135856f395caf4bc049697766debefa6d2e8b3) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.40 to 1.43.42 (#1864) + * [a3172610](https://github.com/argoproj/argo-events/commit/a3172610e1cb1a763eefbdb7051fc17a8784da8d) chore(deps): bump github.com/tidwall/gjson from 1.14.0 to 1.14.1 (#1868) + * [3d59207b](https://github.com/argoproj/argo-events/commit/3d59207b38b99e392ad916307a4e57cc70ec78b4) feat: User should not be able to change Jetstream StreamConfig (#1860) + * [2b9d46ec](https://github.com/argoproj/argo-events/commit/2b9d46ece4fce519c0ab4dc4510a483f3edc2cf3) fix: example for trigger template parameterization (#1858) + * [c80e8080](https://github.com/argoproj/argo-events/commit/c80e8080e1804db5cf811184a1c35578d8891924) fix: address timeout issue on CI server for 'make lint' (#1869) + * [3539edf3](https://github.com/argoproj/argo-events/commit/3539edf3cf6c96400c7e0915bc066cf6a1d39d17) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.41 to 0.9.44 (#1865) + * [5505e95a](https://github.com/argoproj/argo-events/commit/5505e95a43236450854746d3c52e5cadd9429ba2) chore(deps): bump dependabot/fetch-metadata from 1.3.0 to 1.3.1 (#1866) + * [4f8b17e8](https://github.com/argoproj/argo-events/commit/4f8b17e85a1c2bacca8fd0babc176e0cf006d0c5) fix: adding retry logic for Stream creation in the case that another client is trying to create the Stream at the same time (#1854) + * [f43468f4](https://github.com/argoproj/argo-events/commit/f43468f45124786c6cc9e82a65565a096ed79724) fix: broken gh-pages build (#1853) + * [e78d5403](https://github.com/argoproj/argo-events/commit/e78d540346718f4867f19235ea4e88270a41e4de) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.39 to 1.43.40 (#1851) + * [01825a5c](https://github.com/argoproj/argo-events/commit/01825a5c6a8a0fdb90ae9a6534d39afc706046f5) chore(deps): bump cloud.google.com/go/compute from 1.5.0 to 1.6.0 (#1852) + * [300d2965](https://github.com/argoproj/argo-events/commit/300d296554a3f4491a53d6b25a19375fec64d8ce) Fixes related to making Sensor (and Eventsource) more robust if one or more Jetstream Pods goes down (#1845) + * [f7d21e83](https://github.com/argoproj/argo-events/commit/f7d21e834f600245f3c4a72c17896d803899f601) chore(deps): bump actions/setup-python from 3.1.1 to 3.1.2 (#1842) + * [615936f5](https://github.com/argoproj/argo-events/commit/615936f581b9c1cc3286316447c18dfefe645997) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.38 to 1.43.39 (#1848) + * [66866c9b](https://github.com/argoproj/argo-events/commit/66866c9b535f5998fbb0a8e1073e3a3b426fa96b) chore(deps): bump github.com/spf13/viper from 1.10.1 to 1.11.0 (#1847) + * [df82d1a2](https://github.com/argoproj/argo-events/commit/df82d1a2b57f9fc4c1eab542d78d7334fcb49f5a) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.23 to 7.0.24 (#1846) + * [71417a1b](https://github.com/argoproj/argo-events/commit/71417a1b98b2960da2b218031e06606ca43819fe) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.37 to 1.43.38 (#1844) + * [f76fb1e9](https://github.com/argoproj/argo-events/commit/f76fb1e94531dade5262f9c517da8f12dc1b2b98) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.36 to 1.43.37 (#1843) + * [bc44b359](https://github.com/argoproj/argo-events/commit/bc44b35909f625d33fd8168f254d9dc0ee7ac5f0) chore(deps): bump cloud.google.com/go/pubsub from 1.19.0 to 1.20.0 (#1841) + * [4d47cdf1](https://github.com/argoproj/argo-events/commit/4d47cdf14a6046b5a9edbb9276d7a74e62ca32ad) chore(deps): bump github.com/xanzy/go-gitlab from 0.62.0 to 0.63.0 (#1840) + * [b1041d6f](https://github.com/argoproj/argo-events/commit/b1041d6f57a8ce75e795aba53ade9d2119de58a5) fix: example for minio (#1839) + * [5f894191](https://github.com/argoproj/argo-events/commit/5f89419179359999ca5d9a79b7db259f7992c487) chore(deps): bump golangci/golangci-lint-action from 2 to 3 (#1834) + * [02a6f07b](https://github.com/argoproj/argo-events/commit/02a6f07b5cd00150a5f64179a631dcaabe86efac) chore(deps): bump actions/download-artifact from 2 to 3 (#1836) + * [4b42a022](https://github.com/argoproj/argo-events/commit/4b42a02214c68de9770dbe9355de9d657ad433a5) chore(deps): bump actions/upload-artifact from 2 to 3 (#1835) + * [4433234c](https://github.com/argoproj/argo-events/commit/4433234cfc9bf809a807111920c01449e5731a29) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.33 to 1.43.36 (#1832) + * [5863daef](https://github.com/argoproj/argo-events/commit/5863daef6166af233952d701460f039a920fee9b) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.40 to 0.9.41 (#1833) + * [ed5ff514](https://github.com/argoproj/argo-events/commit/ed5ff514855c10de11541bca41b1e202668f662c) chore(deps): bump actions/setup-python from 3.1.0 to 3.1.1 (#1825) + * [29616ec6](https://github.com/argoproj/argo-events/commit/29616ec60b5da7ed404f283192004ce75439a394) feat: TLS for Jetstream (#1815) + * [f869f6b9](https://github.com/argoproj/argo-events/commit/f869f6b98f9612657f857db2063c266af51bec82) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.32 to 1.43.33 (#1819) + * [8ae91841](https://github.com/argoproj/argo-events/commit/8ae91841a93957a5ad50063bd71216d4811337c7) chore(deps): bump github.com/xanzy/go-gitlab from 0.61.0 to 0.62.0 (#1818) + * [9332679c](https://github.com/argoproj/argo-events/commit/9332679c2c6aaef60e7cba1f4a8b9749510b9b0d) feat: use user/pass for JetStream authentication (#1809) + * [86ac587c](https://github.com/argoproj/argo-events/commit/86ac587cf5e9016408f241bedc6fc6074822e517) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.30 to 1.43.32 (#1813) + * [a4869508](https://github.com/argoproj/argo-events/commit/a4869508754e1928f3b9cf08f3622669cb17ef28) chore(deps): bump github.com/xanzy/go-gitlab from 0.60.0 to 0.61.0 (#1810) + * [3ec3713c](https://github.com/argoproj/argo-events/commit/3ec3713ce9f361fb6d36e06aa0d8bdb19e84cc4f) feat: Jetstream - Get closer to "exactly once" by maintaining a cache of message IDs in the Trigger processing code (#1802) + * [0c882f84](https://github.com/argoproj/argo-events/commit/0c882f84e59327c06e3458572952ef5e1558cd7f) chore(deps): bump actions/setup-python from 3.0.0 to 3.1.0 (#1805) + * [d79152f5](https://github.com/argoproj/argo-events/commit/d79152f525e881d2df5d6ed883943a1a49449922) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.8.0 to 2.9.0 (#1803) + * [58330b95](https://github.com/argoproj/argo-events/commit/58330b95e10633ede6b1cb0bc3d7094de2100a48) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.29 to 1.43.30 (#1804) + * [657d33ca](https://github.com/argoproj/argo-events/commit/657d33ca8dd34d8e31b649a8ef1c707137280043) feat: combine controllers to one deployment [Release Note] (#1753) + * [6ac8d049](https://github.com/argoproj/argo-events/commit/6ac8d04982108a71431e012750e28260e6c2e210) feat: Jetstream EventBus (#1783) + * [3e0573d8](https://github.com/argoproj/argo-events/commit/3e0573d80db1dc496ad25946192c91230c0b0843) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.28 to 1.43.29 (#1790) + * [8e906421](https://github.com/argoproj/argo-events/commit/8e906421aa7e3cf298f05262ee6b23d243066c5e) chore(deps): bump sigs.k8s.io/controller-runtime from 0.11.1 to 0.11.2 (#1789) + * [7f8e8602](https://github.com/argoproj/argo-events/commit/7f8e8602d9e79dd87b78f97705d2e487a6d2e319) chore(deps): bump google.golang.org/api from 0.73.0 to 0.74.0 (#1788) + * [e9e7d828](https://github.com/argoproj/argo-events/commit/e9e7d828fbe862207957e052301d5682cfebf283) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.27 to 1.43.28 (#1785) + * [eef37231](https://github.com/argoproj/argo-events/commit/eef37231d19dcf83f9ddefebe2a3a8d2f55aae62) feat: more jetstream configurations (#1771) + * [99ff0d29](https://github.com/argoproj/argo-events/commit/99ff0d29f5aedd69e36d4d894e7db4a9668f0dac) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.26 to 1.43.27 (#1777) + * [34062705](https://github.com/argoproj/argo-events/commit/340627059538af49f095e497fdef1b9e888f393d) fix: redis event source to be able to parameterize (#1754) + * [d058a935](https://github.com/argoproj/argo-events/commit/d058a93588a05e7cfdbee967a76409ab8dea10e6) chore(deps): bump dependabot/fetch-metadata from 1.1.1 to 1.3.0 (#1773) + * [bd1aad75](https://github.com/argoproj/argo-events/commit/bd1aad75320e5a7b35c1cc92ae3ec6f3c99658a2) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.25 to 1.43.26 (#1772) + * [5f88f0e9](https://github.com/argoproj/argo-events/commit/5f88f0e922bbccf398d666483550b5356aacfa86) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.24 to 1.43.25 (#1766) + * [2ad4fd73](https://github.com/argoproj/argo-events/commit/2ad4fd73fecf05a0c50483235c82f1c3847f0ee4) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.23 to 1.43.24 (#1764) + * [9e9e90b2](https://github.com/argoproj/argo-events/commit/9e9e90b26033ca984f45fd15049fe175a02ce635) fix(leaderelection): never give up connection retry to nats server (#1748) + * [c633ab10](https://github.com/argoproj/argo-events/commit/c633ab1043c54b795cfd8e7e03731747e7cffeaa) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.22 to 1.43.23 (#1761) + * [c1fe7758](https://github.com/argoproj/argo-events/commit/c1fe7758fd6b7504e73bf69d626d7a51c3a5fb79) chore(deps): bump github.com/go-redis/redis/v8 from 8.11.4 to 8.11.5 (#1762) + * [4b1e2364](https://github.com/argoproj/argo-events/commit/4b1e2364197747ab9e5e2535ef7281a5ff26bdbb) chore(deps): bump peter-evans/create-pull-request from 3 to 4 (#1763) + * [93cd06d6](https://github.com/argoproj/argo-events/commit/93cd06d67d3a80c009d28ca426dfa74936edba4b) feat: Redis streams as event source (#1744) + * [c6e2745a](https://github.com/argoproj/argo-events/commit/c6e2745ae35f1b5d7f19fd6eb824c05b548900c4) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.21 to 1.43.22 (#1759) + * [626cf09c](https://github.com/argoproj/argo-events/commit/626cf09c3394de9b2e5d6aa14a0caa1379031c27) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.20 to 1.43.21 (#1757) + * [a2bcf947](https://github.com/argoproj/argo-events/commit/a2bcf94784a927345d3f2fa4a333a2cfa073e820) chore(deps): bump github.com/xanzy/go-gitlab from 0.59.0 to 0.60.0 (#1756) + * [72eac052](https://github.com/argoproj/argo-events/commit/72eac052a73cfa3715697e6deab0aec02e202002) chore(deps): bump actions/cache from 2.1.7 to 3 (#1758) + * [6ba8f7ac](https://github.com/argoproj/argo-events/commit/6ba8f7aca9b947ff0289757248f28725c01ae30e) chore(deps): bump google.golang.org/api from 0.72.0 to 0.73.0 (#1750) + * [811229dd](https://github.com/argoproj/argo-events/commit/811229dd93a8f9f06b3a9bca1d3fe9ed40b7eab4) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.39 to 0.9.40 (#1745) + * [b1c39c78](https://github.com/argoproj/argo-events/commit/b1c39c78091fd19d02fc8afd175a11f897c91361) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.17 to 1.43.20 (#1746) + * [4e851539](https://github.com/argoproj/argo-events/commit/4e85153951fddbc81a3296250fafe8ecce4e7695) chore(deps): bump github.com/stretchr/testify from 1.7.0 to 1.7.1 (#1741) + * [8bde891e](https://github.com/argoproj/argo-events/commit/8bde891e9bcd2c69e321427c7a4d58a20a707e3b) chore(deps): bump github.com/apache/pulsar-client-go from 0.1.1 to 0.8.1 (#1740) + * [4154740b](https://github.com/argoproj/argo-events/commit/4154740bef7ceceeb70307593f814780c3b7db6d) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.36 to 0.9.39 (#1739) + * [035523ce](https://github.com/argoproj/argo-events/commit/035523ce350ee464394beb1091d3987d5160219e) chore(deps): bump google.golang.org/api from 0.71.0 to 0.72.0 (#1738) + * [77d4a3aa](https://github.com/argoproj/argo-events/commit/77d4a3aa60cf34e0aa25a6c64b3ece123ba27703) chore(deps): bump github.com/spf13/cobra from 1.3.0 to 1.4.0 (#1734) + * [ced919ca](https://github.com/argoproj/argo-events/commit/ced919caebc9a999d350f76298d71ae5e8be7dd1) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.16 to 1.43.17 (#1733) + * [9a93b030](https://github.com/argoproj/argo-events/commit/9a93b0307444ea3c61b0c63abcb3653bc5e03ca9) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.15 to 1.43.16 (#1725) + * [defc7f18](https://github.com/argoproj/argo-events/commit/defc7f1878b9ab772278c994c24768da92a5cb3c) chore(deps): bump google.golang.org/grpc from 1.44.0 to 1.45.0 (#1724) + * [dacd801d](https://github.com/argoproj/argo-events/commit/dacd801d240901ca4f2a7982db4c26a2a2d60e08) chore(deps): bump github.com/xanzy/go-gitlab from 0.58.0 to 0.59.0 (#1720) + * [d05853c5](https://github.com/argoproj/argo-events/commit/d05853c513ce42b4d18eb6fef7f5c4d05ec277db) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.14 to 1.43.15 (#1721) + * [f4de9e57](https://github.com/argoproj/argo-events/commit/f4de9e57bccbe37618d715c450d6d8c40820ae6e) chore(deps): bump google.golang.org/api from 0.70.0 to 0.71.0 (#1718) + * [93a221a3](https://github.com/argoproj/argo-events/commit/93a221a3d4313014c836fa98e2f47b903f031221) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.13 to 1.43.14 (#1717) + * [c2b2d5ec](https://github.com/argoproj/argo-events/commit/c2b2d5eccfdcf6363e71630fadea320f4e7406e4) chore(deps): bump cloud.google.com/go/pubsub from 1.18.0 to 1.19.0 (#1715) + * [a818b7cb](https://github.com/argoproj/argo-events/commit/a818b7cb4b204a800cfa51c29a3d542495e39be6) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.12 to 1.43.13 (#1714) + * [475baacb](https://github.com/argoproj/argo-events/commit/475baacb9581d644aef504be182f4e52f59f211c) feat: jetstream eventbus controller implementation (#1705) + * [49709878](https://github.com/argoproj/argo-events/commit/4970987849a02a17b755f00608945faceeb0937e) chore(deps): bump github.com/xanzy/go-gitlab from 0.57.0 to 0.58.0 (#1711) + * [8f9bbb15](https://github.com/argoproj/argo-events/commit/8f9bbb156ef876f31c1a2f9f288d562466f4766a) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.10 to 1.43.12 (#1709) + * [26d79085](https://github.com/argoproj/argo-events/commit/26d7908546423bcc199b28a946e656f3dd4657da) chore(deps): bump github.com/xanzy/go-gitlab from 0.55.1 to 0.57.0 (#1708) + * [56ccdd7d](https://github.com/argoproj/argo-events/commit/56ccdd7d6657d36b78f3107790852f0b88b7d03c) chore(deps): bump github.com/xdg-go/scram from 1.1.0 to 1.1.1 (#1707) + * [fc2c73bb](https://github.com/argoproj/argo-events/commit/fc2c73bb5e19c841b8c33f98c46ea38f297d298f) fix: Quick fix for Issue #1694 (#1701) + * [b18b5a5f](https://github.com/argoproj/argo-events/commit/b18b5a5fd08300ac59d9d7f3fadde15c70f2fefe) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.9 to 1.43.10 (#1704) + * [e035db3b](https://github.com/argoproj/argo-events/commit/e035db3bee76a3f3067752a1e64fc5ed0586e0ea) chore(deps): bump cloud.google.com/go/pubsub from 1.3.1 to 1.18.0 (#1703) + * [9cfec6bb](https://github.com/argoproj/argo-events/commit/9cfec6bbf4f099dcd9021fd64638aecbdbcad7ec) chore(deps): bump actions/stale from 4.1.0 to 5 (#1700) + * [e9ab1806](https://github.com/argoproj/argo-events/commit/e9ab180636de8ab856d0c16c1da1261cf2e3c82e) chore(deps): bump actions/checkout from 2 to 3 (#1699) + * [45da391a](https://github.com/argoproj/argo-events/commit/45da391a5f531a731679baaba7d911d1b2d2cf7b) chore(deps): bump github.com/aws/aws-sdk-go from 1.43.5 to 1.43.9 (#1698) + * [80ed9b14](https://github.com/argoproj/argo-events/commit/80ed9b1477e507230a5d0b171f7e99a79eec74cc) Support configuring argo CLI args when using argo workflow trigger. (#1655) + * [44fe06c7](https://github.com/argoproj/argo-events/commit/44fe06c7c9f363893a6229615b16c7e6a072d230) Yubo added to users (#1693) + * [35f643a8](https://github.com/argoproj/argo-events/commit/35f643a88a1e57804998c862ffcb036dd53b6154) fix: changing lastResetTime to a time.Time from int64 seconds so time comparison can occur at a finer granularity (#1695) + * [b9bbfd15](https://github.com/argoproj/argo-events/commit/b9bbfd15de09200859d39a8c838ff7e934e38a17) chore(deps): bump github.com/itchyny/gojq from 0.12.6 to 0.12.7 (#1692) + * [2b369352](https://github.com/argoproj/argo-events/commit/2b36935249223dcb8886bfb381976668fdf696b0) chore(deps): bump github.com/Shopify/sarama from 1.31.1 to 1.32.0 (#1684) + * [75d4cbf3](https://github.com/argoproj/argo-events/commit/75d4cbf30bc23e9d95b4d5af67b2ab8b5ccba869) chore(deps): bump github.com/ktrysmt/go-bitbucket from 0.9.32 to 0.9.36 (#1683) + * [a2562c08](https://github.com/argoproj/argo-events/commit/a2562c08c5729d8c795c4ca2b66d55941e212084) chore(deps): bump actions/setup-python from 2.3.2 to 3.0.0 (#1685) + * [597ddb86](https://github.com/argoproj/argo-events/commit/597ddb86ac95d6087c5a5f3364c73be3f727cb74) chore(deps): bump actions/setup-go from 2.2.0 to 3.0.0 (#1686) + * [35ced2ba](https://github.com/argoproj/argo-events/commit/35ced2ba002b6079490eb78a87d0faf9f042d20b) chore(deps): bump cloud.google.com/go/compute from 1.3.0 to 1.5.0 (#1674) + * [babfcd16](https://github.com/argoproj/argo-events/commit/babfcd163d7f295ea422c192aa8b2413d5ee43d5) chore(deps): bump github.com/minio/minio-go/v7 from 7.0.15 to 7.0.23 (#1673) + * [881eb33c](https://github.com/argoproj/argo-events/commit/881eb33c3d614925b0745f116a87bd725a3340a0) fix: read kafka error channel. Fixes #1656 (#1664) + * [0db531cc](https://github.com/argoproj/argo-events/commit/0db531ccb42061256cf74a9d21b8378b5d940af1) fix: remove string replacement on expressions for event filtering (#1670) + * [d7190e8e](https://github.com/argoproj/argo-events/commit/d7190e8e36e6e4adbfc0d9bbec71f416eacd9757) chore(deps): bump github.com/tidwall/gjson from 1.13.0 to 1.14.0 (#1668) + * [b5a63bb1](https://github.com/argoproj/argo-events/commit/b5a63bb14ef195b39e2813f05c531bae1ae8b3ab) chore(deps): bump github.com/aws/aws-sdk-go from 1.42.50 to 1.43.5 (#1669) + * [ddda8800](https://github.com/argoproj/argo-events/commit/ddda8800f9f81f200dd93e9aedf732a2b4de8fb1) chore(deps): bump google.golang.org/api from 0.69.0 to 0.70.0 (#1666) + * [ee282526](https://github.com/argoproj/argo-events/commit/ee282526144e6e87c8358a1d9db0fe1cf20954cc) chore(deps): bump github.com/bradleyfalzon/ghinstallation/v2 (#1665) + * [0e5875a1](https://github.com/argoproj/argo-events/commit/0e5875a1c4849e7a7821c54265400641f126287b) chore(deps): bump go.uber.org/zap from 1.20.0 to 1.21.0 (#1661) + * [c236b7d5](https://github.com/argoproj/argo-events/commit/c236b7d55dd9708bb8b02dc6114a66201ad0f4fc) chore(deps): bump github.com/antonmedv/expr from 1.8.8 to 1.9.0 (#1660) + * [ad1b19ea](https://github.com/argoproj/argo-events/commit/ad1b19ea4958de3e4a019be6f3915bc03dd9dd84) fix: Update docs for Openshift support for the Validating Admission Webhook (#1650) + * [64c7e177](https://github.com/argoproj/argo-events/commit/64c7e1778430394f8b3bffb264ad64d209037802) feat: Adds argo stop as a supported operation for argoWorkflow triggers. (#1648) + * [6faed05d](https://github.com/argoproj/argo-events/commit/6faed05d0f94adb2c0f9ac2054edbdae1fc72f6e) chore(deps): bump github.com/Azure/azure-event-hubs-go/v3 (#1646) + * [bfcc8a90](https://github.com/argoproj/argo-events/commit/bfcc8a909823b4a5d42eeb2ce7a82fd155a1177f) chore(deps): bump github.com/slack-go/slack from 0.10.1 to 0.10.2 (#1645) + * [8ce5cadf](https://github.com/argoproj/argo-events/commit/8ce5cadf3ce5d5961ce3b1d8f4f72c8c8157a6eb) chore(deps): bump google.golang.org/api from 0.68.0 to 0.69.0 (#1644) + * [4576c354](https://github.com/argoproj/argo-events/commit/4576c35429f32b0ea16b94db626318c76fc1405d) chore(deps): bump github.com/xanzy/go-gitlab from 0.54.4 to 0.55.1 (#1642) + * [c0d22950](https://github.com/argoproj/argo-events/commit/c0d22950b0778b830e12918e5691e6a3a2fd9fd2) fix: Ignore mv event buses error on make codegen. Fixes #1638 (#1639) + * [176caabe](https://github.com/argoproj/argo-events/commit/176caabe66dc5e6119dc3cae9f462fe77dc1cf85) fix: adds SigningCertURL validation for SNS messages (#1637) + * [00e2ae80](https://github.com/argoproj/argo-events/commit/00e2ae801addcd362a22613a745ae424932efa40) Add SQS Custom endpoint support (#1632) + * [4721cf3d](https://github.com/argoproj/argo-events/commit/4721cf3d01d7ea06d1c93cb0b7a986698d977500) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.1.0 to 2.8.0 (#1635) + * [ee37d13f](https://github.com/argoproj/argo-events/commit/ee37d13f17389cd134ede63a5677553336830be7) chore(deps): bump google.golang.org/api from 0.65.0 to 0.68.0 (#1634) + +### Contributors + + * Aaron Weisberg + * Daniel Maizel + * David Collom + * Derek Wang + * Dillen Padhiar + * J.P. Zivalich + * Julie Vogelman + * Lawrence Carvalho + * Shilei Liu + * Sreekanth + * dependabot[bot] + * descrepes + * itamarom + * wangyi + +## v1.6.3 (2022-03-03) + + * [6f7f9500](https://github.com/argoproj/argo-events/commit/6f7f95006e040af30ed2cf40a3910891d2c50a4d) Update manifests to v1.6.3 + * [55f1fd02](https://github.com/argoproj/argo-events/commit/55f1fd0265f6a973330cd4d5c02c75121deb5256) fix: Quick fix for Issue #1694 (#1701) + * [707be7b0](https://github.com/argoproj/argo-events/commit/707be7b0651bcb9c51e2f1f07d89e3f8c5746272) Support configuring argo CLI args when using argo workflow trigger. (#1655) + +### Contributors + + * Derek Wang + * Julie Vogelman + * Lawrence Carvalho + +## v1.6.2 (2022-03-01) + + * [b5e2bcb1](https://github.com/argoproj/argo-events/commit/b5e2bcb143131beb336613fabaef2d89364c0697) Update manifests to v1.6.2 + * [1c6a5d42](https://github.com/argoproj/argo-events/commit/1c6a5d4286bc3d9ae422eda961da0cda60b2cc09) fix: changing lastResetTime to a time.Time from int64 seconds so time comparison can occur at a finer granularity (#1695) + +### Contributors + + * Derek Wang + * Julie Vogelman + +## v1.6.1 (2022-02-27) + + * [6f1aff65](https://github.com/argoproj/argo-events/commit/6f1aff6558b779f919a4c8dbe553d1eac9f8486a) Update manifests to v1.6.1 + * [3113eb89](https://github.com/argoproj/argo-events/commit/3113eb898caf8ac78ed81187c469d0e6c34a9053) fix: read kafka error channel. Fixes #1656 (#1664) + * [54c2ef07](https://github.com/argoproj/argo-events/commit/54c2ef07affa1539d4107277ae72ea569929e1a2) fix: remove string replacement on expressions for event filtering (#1670) + * [ba5d2c42](https://github.com/argoproj/argo-events/commit/ba5d2c422541d4baa978b0281191215918072333) feat: Adds argo stop as a supported operation for argoWorkflow triggers. (#1648) + * [1780d833](https://github.com/argoproj/argo-events/commit/1780d833c8717801a8a950c54212033552b2b7ae) chore(deps): bump github.com/xanzy/go-gitlab from 0.54.4 to 0.55.1 (#1642) + * [6d95f54b](https://github.com/argoproj/argo-events/commit/6d95f54b51978bd9594522505ddba15e93ab2850) fix: adds SigningCertURL validation for SNS messages (#1637) + * [40b2df63](https://github.com/argoproj/argo-events/commit/40b2df6368b5459ebe0846c4ca37ec7b6c660744) Add SQS Custom endpoint support (#1632) + * [6b6788e1](https://github.com/argoproj/argo-events/commit/6b6788e168d44d92c502f85a08ad096b0e79ccc0) chore(deps): bump github.com/cloudevents/sdk-go/v2 from 2.1.0 to 2.8.0 (#1635) + * [056aaa66](https://github.com/argoproj/argo-events/commit/056aaa664c518371e02611cfff687ca1e265b82e) chore(deps): bump google.golang.org/api from 0.65.0 to 0.68.0 (#1634) + +### Contributors + + * Aaron Weisberg + * Derek Wang + * Dillen Padhiar + * Lawrence Carvalho + * dependabot[bot] + * itamarom + * wangyi + +## v1.6.0 (2022-02-12) + + * [f81754da](https://github.com/argoproj/argo-events/commit/f81754da2b8df1dd628117be7bc8d2d5cf5f14c1) Update manifests to v1.6.0 + * [b2936c19](https://github.com/argoproj/argo-events/commit/b2936c19fc9dd766acdd0a5b1d38d571e427aa37) fix: make sns-sensor example work (#1629) + * [f55c1748](https://github.com/argoproj/argo-events/commit/f55c1748e7dbcb2bb124e2ec43127f8ae18bb972) chore(deps): bump github.com/prometheus/client_golang (#1621) + * [6e976052](https://github.com/argoproj/argo-events/commit/6e97605216d15948dedb628cae4f6db1e62c33e7) chore(deps): bump github.com/aws/aws-sdk-go from 1.35.24 to 1.42.50 (#1617) + * [ea5b5a5b](https://github.com/argoproj/argo-events/commit/ea5b5a5b12176b1adc0caccf40c1800f9beb7e3f) feat (kafa-es): Implements SCRAM functionality and enables SCRAM-SHA512/256 SASL + * [097e87d8](https://github.com/argoproj/argo-events/commit/097e87d8c7e6db7e1036f0159041230fe15bb834) chore(deps): bump actions/setup-go from 2.1.5 to 2.2.0 (#1609) + * [8bea0754](https://github.com/argoproj/argo-events/commit/8bea07540a4541fd51979efe351997fde5c57887) fix: Garabge collection of the ValidatingWebhookConfiguration (#1603) + * [54fb550a](https://github.com/argoproj/argo-events/commit/54fb550a080c6ecba73a8bf3ea3b851de6e535b7) chore(deps): bump github.com/eclipse/paho.mqtt.golang (#1570) + * [968bddf7](https://github.com/argoproj/argo-events/commit/968bddf77ecba154bb9187a176eb6b35294c9500) chore(deps): bump github.com/go-swagger/go-swagger from 0.25.0 to 0.29.0 (#1555) + * [993fb2d1](https://github.com/argoproj/argo-events/commit/993fb2d12e2c69d709d4d33e850768e5ae6ef4a4) username, password authentication doesn't work when AMQPLAIN authentication mechanism is used; instead using PLAIN mechanism: apparently the former is non-standard according to https://www.rabbitmq.com/access-control.html (#1600) + * [b5f75ca8](https://github.com/argoproj/argo-events/commit/b5f75ca85be6289abe6ca24b4e4c269487be32cf) chore(deps): bump github.com/go-redis/redis (#1583) + * [dc9ca54b](https://github.com/argoproj/argo-events/commit/dc9ca54b1d3016e3cb3478b8e0d239267184408f) chore(deps): bump actions/setup-python from 2.3.1 to 2.3.2 (#1591) + * [5d25d7e5](https://github.com/argoproj/argo-events/commit/5d25d7e586bce8770cf7b9d737d157919f61e28c) feat: added filtering feature for EventSource (#1582) + * [9606aed2](https://github.com/argoproj/argo-events/commit/9606aed21f899c53f7d0678a9f3b3a2a01ce8a42) fix: conditions reset does not work if the service is down at the triggering time (#1585) + * [c0a28a6e](https://github.com/argoproj/argo-events/commit/c0a28a6e46510ed495b2be96cd3c07df7afd9ab8) fix: Update Github EventSource Example to include port name (#1590) + * [0b64436c](https://github.com/argoproj/argo-events/commit/0b64436cb813c98664a7758f99164c0ca6fe4719) chore(deps): bump github.com/xanzy/go-gitlab from 0.50.2 to 0.54.4 (#1586) + * [fb75dcac](https://github.com/argoproj/argo-events/commit/fb75dcacc247bc3459ab1a9129ec3023bcb8ae8a) chore(deps): bump github.com/Shopify/sarama from 1.26.1 to 1.31.1 (#1584) + * [00f2ca47](https://github.com/argoproj/argo-events/commit/00f2ca47e293ce9ddb4318d493eeefe3676bd91e) chore(deps): bump github.com/spf13/viper from 1.10.0 to 1.10.1 (#1575) + * [7998befd](https://github.com/argoproj/argo-events/commit/7998befdab0b1a6f52f9e3460c104b4bc265694b) chore(deps): bump cloud.google.com/go/compute from 0.1.0 to 1.1.0 (#1580) + * [4cbce311](https://github.com/argoproj/argo-events/commit/4cbce31101739d9c5e33a25ee62bae9dd4655e76) fix: e2e test not recognizing .kube/config (#1581) + * [12d311d9](https://github.com/argoproj/argo-events/commit/12d311d926eb188b62d0fb522a8c81db0a142950) chore(deps): bump github.com/go-resty/resty/v2 from 2.3.0 to 2.7.0 (#1574) + * [7dd098dc](https://github.com/argoproj/argo-events/commit/7dd098dce50574817ae3b0f82628923c98534e57) chore(deps): bump google.golang.org/grpc from v1.42.0 to v1.43.0 (#1579) + * [455f4ab5](https://github.com/argoproj/argo-events/commit/455f4ab5a4f366f1821479f5c060a5ed6bccd14b) chore(deps): bump go.uber.org/zap from 1.19.0 to 1.20.0 (#1573) + * [e61e0539](https://github.com/argoproj/argo-events/commit/e61e0539c6c5c36e7eecbebc4cfec0a686d8bfe2) chore(deps): bump github.com/nats-io/stan.go from 0.6.0 to 0.10.2 (#1559) + * [c49d6969](https://github.com/argoproj/argo-events/commit/c49d696932af0873c7cfbd4680618fc1e856432a) chore(deps): bump github.com/nsqio/go-nsq from 1.0.8 to 1.1.0 (#1569) + * [725d8cee](https://github.com/argoproj/argo-events/commit/725d8cee5f88f0bf3b599747786e7a5050dc3c94) chore(deps): bump github.com/mitchellh/mapstructure from 1.4.1 to 1.4.3 (#1560) + * [6e57f135](https://github.com/argoproj/argo-events/commit/6e57f135e28f9003821f9e7028fd423a0d7248ac) chore(deps): bump github.com/spf13/cobra from 1.2.1 to 1.3.0 (#1562) + * [c73c85ca](https://github.com/argoproj/argo-events/commit/c73c85cac16e6f957ffba1efcddbd0df4ab48154) chore(deps): bump github.com/go-openapi/spec from 0.20.2 to 0.20.4 (#1558) + * [bc90c77d](https://github.com/argoproj/argo-events/commit/bc90c77d704e1e2188cdcc7f658cf540e61efbb3) chore(deps): bump google.golang.org/api from 0.44.0 to 0.65.0 (#1557) + * [f2234fb9](https://github.com/argoproj/argo-events/commit/f2234fb99f81d23b7e15663451cb3beb580a76e1) chore(deps): bump github.com/google/go-cmp from 0.5.6 to 0.5.7 (#1556) + * [bef51660](https://github.com/argoproj/argo-events/commit/bef51660bfcee6d204005c7c98da4396e7bbb39d) chore(deps): bump github.com/smartystreets/goconvey from 1.6.4 to 1.7.2 (#1554) + * [b08ac6b1](https://github.com/argoproj/argo-events/commit/b08ac6b1bcf72da85ab4ae0abab115e9de306597) chore(deps): bump actions/stale from 3 to 4.1.0 (#1553) + * [dda87394](https://github.com/argoproj/argo-events/commit/dda87394aee1365f715264db09ab89c0e44c4ae7) chore(deps): bump actions/setup-python from 1 to 2.3.1 (#1552) + * [82989838](https://github.com/argoproj/argo-events/commit/82989838a1a7c9dd3d4d9696c9ca54700cca81de) chore(deps): bump actions/cache from 1 to 2.1.7 (#1551) + * [9ebe4eec](https://github.com/argoproj/argo-events/commit/9ebe4eec54208c4640239e4cac15908e0fd04613) chore(deps): bump actions/setup-go from 1 to 2.1.5 (#1550) + * [b963de56](https://github.com/argoproj/argo-events/commit/b963de5607466a94bdc631d4f470d92913882178) feat: overall improve filtering documentation (#1508) + * [aca3c692](https://github.com/argoproj/argo-events/commit/aca3c692fd1475c5491a30e883cccb94e1addd32) Support multiple repos bitbucketserver (#1540) + * [d7034743](https://github.com/argoproj/argo-events/commit/d703474329954b0439a42e6fd50b5febe58b229e) fix: removed "limitation" from outdated doc (#1535) + * [85af547f](https://github.com/argoproj/argo-events/commit/85af547f8d80ec4ddbf4113e16d42cd73e7d9c9e) feat(eventsource): add optional arguments for amqp eventsource (#1501) + * [8cfbe2af](https://github.com/argoproj/argo-events/commit/8cfbe2afffd07582b5ffd54df61cb65386811054) feat: Add optional security context for event bus container template (#1519) + * [18ca4a13](https://github.com/argoproj/argo-events/commit/18ca4a1316886f7f984bfc13e1cced412aca197a) fix(docs): about event transform release date (#1529) + * [fb5c2572](https://github.com/argoproj/argo-events/commit/fb5c25724e51096bb041fe533be9eedbb77cea9b) fix: conditions reset honors the latest timestamp of all unacked msgs (#1523) + * [4153c388](https://github.com/argoproj/argo-events/commit/4153c388f6c4839e0f0fb26b5f0109f871d589aa) fix: upgrade dependencies for security alert (#1518) + * [15bc1b6c](https://github.com/argoproj/argo-events/commit/15bc1b6c043646aec62ba8774ee4cb122fd5536b) fix: let the service restart after leader switched (#1513) + * [fe62b2f2](https://github.com/argoproj/argo-events/commit/fe62b2f29f4314e6f0ba9f16ad1b149b48b3c25e) fix: Sensor triggers an Argo Workflow with wrong name (#1511) + * [f91e0590](https://github.com/argoproj/argo-events/commit/f91e05905ae55094912ff1153b80fd3736db7727) feat: Improved Sensors filtering feature (aka Sensors filtering v2) (#1490) + * [faa69c73](https://github.com/argoproj/argo-events/commit/faa69c73ac370c16226270fcf81b39a97472d48c) Modified deprecated projectID field of Gitlab Eventsource to be optional (#1500) + * [849a9ffb](https://github.com/argoproj/argo-events/commit/849a9ffb74cc402d20dedf0b57380bda4f09bb71) Bitbucket Cloud Eventsource (#1493) + * [cd54e79b](https://github.com/argoproj/argo-events/commit/cd54e79b4db924ff577c9227260d5889b991222e) Update parameters in minio example (#1414) + * [5f235819](https://github.com/argoproj/argo-events/commit/5f235819c938f2d17aff636d7ec3a460dcca07eb) feat: added event transform (#1492) + * [8f3fdf08](https://github.com/argoproj/argo-events/commit/8f3fdf08a10c88cf48134528e8c9a9edd3dd8afb) fix: int-or-string k8s type in jsonschema (#1466) + * [3626aec5](https://github.com/argoproj/argo-events/commit/3626aec505d3279414c56ad35180f87c63cf9a5b) fix: changed method call in event-sources/sources/kafka/start.go (#1457) + * [cdab1e35](https://github.com/argoproj/argo-events/commit/cdab1e353c19cff6406498d72aa5362f108c3be1) fix: Ack handled messages in pulsar eventsource (#1455) + * [b4c3b5ce](https://github.com/argoproj/argo-events/commit/b4c3b5ce8c00c0541f831ff2c5e5492f0ef71fdf) feat: Adding support to propagate unique ID to message for Kafka EventSource messages (#1453) + * [7cecd3cf](https://github.com/argoproj/argo-events/commit/7cecd3cf6a345dbc1e84f1beb9c5c206574a8239) feat: Add GitHub App creds support (#1438) + * [2168576a](https://github.com/argoproj/argo-events/commit/2168576add93e789966ee0de091042af231a2af5) fix: Added plural exception for eventbus resource for k8s trigger (#1440) + * [5b9a0db6](https://github.com/argoproj/argo-events/commit/5b9a0db69b44cfe34b2564759c514d34fe3d607e) Update USERS.md (#1437) + * [7ad60278](https://github.com/argoproj/argo-events/commit/7ad602783e590898909ac4c6682c5a66c6b21f0e) fix: Creation of ingress resource with k8s trigger (#1434) + * [5c50e1b7](https://github.com/argoproj/argo-events/commit/5c50e1b7de2aefa32731d2abdfb3fc0ade3d326b) fix: Eval of params with missing src dep and no default value (#1433) + * [6d5be342](https://github.com/argoproj/argo-events/commit/6d5be34281cd3fca0b9f66b59829c5c44589829c) fix: Don't wait for context when listening fails (#1428) + * [878c8d18](https://github.com/argoproj/argo-events/commit/878c8d18c86c3534d08518f30760cfbead0514e7) fix: Marked optional fields in github and calendar eventsources (#1421) + * [45abcb99](https://github.com/argoproj/argo-events/commit/45abcb99aab8013e5154ce698db5a764ca4a47a4) feat: Added org level webhook support for github es (#1412) + * [49166c4b](https://github.com/argoproj/argo-events/commit/49166c4b7dfc8b3ac601ea1fcd39b57c8f57919d) fix: Update minio-go to v7 (#1411) + * [fcc236eb](https://github.com/argoproj/argo-events/commit/fcc236eb7ca36bf183f910fa22fde9a8800fe230) fix: fatal the services if starting failed (#1405) + * [aff2726d](https://github.com/argoproj/argo-events/commit/aff2726d6a5229f74773cc5b977a9f8123bfaf01) fix: resource eventsource duplicate update events. Fixes #760 (#1025) (#1404) + * [4a1bf22d](https://github.com/argoproj/argo-events/commit/4a1bf22d82cdedb8c4b4f12e819244587a5ae432) feat: Added dlq option to aws-sqs eventsource spec (#1403) + * [73bafd4e](https://github.com/argoproj/argo-events/commit/73bafd4ebb544fb0955a58ff7a847a1e12e422d7) feat: Added support for maxPayload, raftHeartbeatTimeout, raftElectionTimeout, raftLeaseTimeout, raftCommitTimeout native nats settings (#1402) + * [5212bad9](https://github.com/argoproj/argo-events/commit/5212bad9b32dc4a6f4aace4030acc44c430a4ba9) feat: Added json schema generation (#1398) + * [e472dae7](https://github.com/argoproj/argo-events/commit/e472dae7b1514b141457365153e65a163d52e5f4) feat: Sensor/AWSLambdaTrigger: allow using AWS IAM Role specified in Service Account (#1394) + * [65c49d1d](https://github.com/argoproj/argo-events/commit/65c49d1d031ab9ef9c7edaf4d9f0618273cb733f) feat: trigger conditions reset. Closes #1381 (#1392) + * [a67558ea](https://github.com/argoproj/argo-events/commit/a67558eac349ed4fd47533c998c1160986382299) fix typo (#1385) + * [79c472de](https://github.com/argoproj/argo-events/commit/79c472ded3b11a7c5cf6b10e5d5ffd934f3b8175) fix: sensor controller validate sensor (#1378) + +### Contributors + + * Arnar + * Capable-Disk4147 + * Christopher Cutajar + * Daniel + * Daniel Habib + * Daniel Maizel + * Derek Wang + * Dillen Padhiar + * Julie Vogelman + * Krzysztof Romanowski + * Luca Iachini + * Matteo Baiguini + * Michael Seiwald + * Muhammad Hamza Zaib + * Nguyen Duy Tho + * Vaibhav + * William Van Hevelingen + * dependabot[bot] + * dubeau + * sharon-codefresh + * usamaB + +## v1.5.6 (2022-01-11) + + * [8a5db8d0](https://github.com/argoproj/argo-events/commit/8a5db8d0732e98c8b4a60adaca81e47100096f77) Update manifests to v1.5.6 + * [64621a5d](https://github.com/argoproj/argo-events/commit/64621a5da4b3dac58f439839a65e22791d4a7432) fix: let the service restart after leader switched (#1513) + * [57bdb37a](https://github.com/argoproj/argo-events/commit/57bdb37a23f5cd305b29297fc59da2034e012313) feat: Add GitHub App creds support (#1438) + * [b1dc32c9](https://github.com/argoproj/argo-events/commit/b1dc32c93153c9bb80866613fba733cd5820614b) fix: Sensor triggers an Argo Workflow with wrong name (#1511) + +### Contributors + + * Daniel + * Derek Wang + +## v1.5.5 (2021-12-18) + + * [3bfd7f9f](https://github.com/argoproj/argo-events/commit/3bfd7f9fd58152fc86b4533fef6912289e4ef39e) Update manifests to v1.5.5 + * [8342f67d](https://github.com/argoproj/argo-events/commit/8342f67d4b3e2dcb8c6e64e9cfafdcd5f8d71d98) feat: Sensor/AWSLambdaTrigger: allow using AWS IAM Role specified in Service Account (#1394) + +### Contributors + + * Derek Wang + * Krzysztof Romanowski + +## v1.5.4 (2021-12-10) + + * [ad7f7fa7](https://github.com/argoproj/argo-events/commit/ad7f7fa7c80706a94275c0d566cf73d15f2cff37) Update manifests to v1.5.4 + * [6f0d4fcc](https://github.com/argoproj/argo-events/commit/6f0d4fccb43f4b224d7aa66aeb05daa3b95a2ec5) fix: changed method call in event-sources/sources/kafka/start.go (#1457) + * [76777a71](https://github.com/argoproj/argo-events/commit/76777a71382b310e0d945c37629f5ebd746147e3) feat: Adding support to propagate unique ID to message for Kafka EventSource messages (#1453) + * [202e50eb](https://github.com/argoproj/argo-events/commit/202e50eb6030a761f9200d7910223b5ebfa8bb62) fix: Update minio-go to v7 (#1411) + * [d4464372](https://github.com/argoproj/argo-events/commit/d4464372d829e62a0967deeaa3b830de9ac13979) fix: Ack handled messages in pulsar eventsource (#1455) + * [29b1182c](https://github.com/argoproj/argo-events/commit/29b1182c59912bf3f334b03f47ddc9641e013a5a) fix: resource eventsource duplicate update events. Fixes #760 (#1025) (#1404) + +### Contributors + + * Arnar + * Derek Wang + * Dillen Padhiar + * Michael Seiwald + * dubeau + +## v1.5.3 (2021-11-22) + + * [fd398f1d](https://github.com/argoproj/argo-events/commit/fd398f1d76c01d8a549ba5d84d829ce956b281d3) Update manifests to v1.5.3 + * [9663255c](https://github.com/argoproj/argo-events/commit/9663255caff567277759e10ffc62c632589d375c) fix: Creation of ingress resource with k8s trigger (#1434) + * [bbef0bbe](https://github.com/argoproj/argo-events/commit/bbef0bbe683f7ee65452994f623f473b66fbedd4) fix: Eval of params with missing src dep and no default value (#1433) + +### Contributors + + * Daniel + * Derek Wang + +## v1.5.2 (2021-11-09) + + * [aa6bd169](https://github.com/argoproj/argo-events/commit/aa6bd169e8ccb28865462d5677b2314f18eef974) Update manifests to v1.5.2 + * [84939d6a](https://github.com/argoproj/argo-events/commit/84939d6a61e607c71d273f04f7172a4fdfe9455c) fix: fatal the services if starting failed (#1405) + +### Contributors + + * Derek Wang + +## v1.5.1 (2021-11-07) + + * [f1c013b8](https://github.com/argoproj/argo-events/commit/f1c013b8201db90a81604a87cca74a6c80b3456b) Update manifests to v1.5.1 + * [3a0f77f6](https://github.com/argoproj/argo-events/commit/3a0f77f6d54fc6af39536a4d6fa90850c0f0f466) feat: trigger conditions reset. Closes #1381 (#1392) + * [ee13f505](https://github.com/argoproj/argo-events/commit/ee13f5057cc5a0a2763441469fec6ced920f5ee8) fix: sensor controller validate sensor (#1378) + +### Contributors + + * Derek Wang + * Luca Iachini + +## v1.5.0 (2021-10-12) + + * [e7dcd3d2](https://github.com/argoproj/argo-events/commit/e7dcd3d21925e80561d7f82a2dc45b213a618d0b) Update manifests to v1.5.0 + * [327bdf74](https://github.com/argoproj/argo-events/commit/327bdf74a7f3dce5f1e87412ce5a1c406ff92e9c) feat: Generate CHANGELOG.md automatically (#1368) + * [a1af470f](https://github.com/argoproj/argo-events/commit/a1af470f7b6de34f5d899c9a203d262ec6720cd1) fix: disable bool simplifier due to performance issue. Fixes #1339 (#1363) + * [d556788e](https://github.com/argoproj/argo-events/commit/d556788e04971f74ef90fbca0058819eca3243df) fix(example): url sensor argo-workflow link (#1362) + * [42fe43e7](https://github.com/argoproj/argo-events/commit/42fe43e724e7c8985228a4abff404c1856617d74) feat: Adding support for token authentication in Pulsar EventSource (#1359) + * [10321c7d](https://github.com/argoproj/argo-events/commit/10321c7d8f5200ba4c2ed630e24ef1c94290d78d) fix:(doc): quick start link (#1358) + * [74205b07](https://github.com/argoproj/argo-events/commit/74205b07360a9ddd47032416eff031c8db1fb10a) feat: Adding Pulsar sensor (#1356) + * [f10e80cc](https://github.com/argoproj/argo-events/commit/f10e80cc97d568b9c345cdc7719d2cd8899c9e56) feat: support for PubSub emulator (#1343) + * [38e4fb5e](https://github.com/argoproj/argo-events/commit/38e4fb5eacb20fffa480a12c4c8a859fe0ef3fd8) fix: mask auth secret. Fixes: #1336 (#1337) + * [ef7b0b2d](https://github.com/argoproj/argo-events/commit/ef7b0b2d552c3f9bcd258a7c2bd54d025f596806) feat(eventsource): Add urlSecret to AMQP event source (#1335) + * [24ef7c61](https://github.com/argoproj/argo-events/commit/24ef7c61daa071e40b2f4c84245096bb924b4ec9) feat: Removed usage of redundant GVR fields in k8s/workflow triggers (#1333) + * [a6e23a9b](https://github.com/argoproj/argo-events/commit/a6e23a9b80bb4082022bc0a30973f170d0480e1e) Fix: amqp allow empty exchange name (#1328) + * [27bad798](https://github.com/argoproj/argo-events/commit/27bad798cdc39c4579631eed464993259b12d2fa) fix: added service account creation step in quick start docs (#1324) + * [29c14506](https://github.com/argoproj/argo-events/commit/29c14506c3ba17ea35eb4d130830adbdb338e179) feat: expose image pull policy and upgrade stan to v0.22.1 (#1325) + * [dde50f2a](https://github.com/argoproj/argo-events/commit/dde50f2a069a7a083f90b82adcec22aa307ae5f1) fix: Added cluster resources support in k8s trigger (#1323) + * [d8cf6e02](https://github.com/argoproj/argo-events/commit/d8cf6e0249edbd6d88e161f9ebab05520c28465e) fix(eventbus): allow clients with istio sidecar. Fixes: #1311 (#1312) + * [4ba6543f](https://github.com/argoproj/argo-events/commit/4ba6543f9e3ec4d2b340ce5a8618da401b5244b1) feat: trigger rate limit. Closes: #1087 (#1318) + * [55a992be](https://github.com/argoproj/argo-events/commit/55a992bed084c7921a87ae1ab434fe995df4f987) fix: correct field name in error message (#1317) + * [feeaf5d0](https://github.com/argoproj/argo-events/commit/feeaf5d0420f6949ccaf1904a8dcd58c65a176ac) feat: add eventsource support for Bitbucket Server. Fixes #891 (#1223) + * [bd737904](https://github.com/argoproj/argo-events/commit/bd7379042ce2172f9e29f5bb3af6c3f44da7e1d5) fix: Upgrade pkg to v0.10.1 (#1305) + * [c6725a9f](https://github.com/argoproj/argo-events/commit/c6725a9fa313ea29afc19687cb1d102244362cc8) feat(eventsource): gitlab to support mutiple projects (#1297) + * [d19cb22c](https://github.com/argoproj/argo-events/commit/d19cb22c4b67dcd993c362b9c6310f29b035037a) fix(docs): add missing dataKey for examples (#1286) + * [181198ae](https://github.com/argoproj/argo-events/commit/181198aeb4c220bddbb64df1dc2a107a64c08976) docs(users): Add WooliesX (#1281) + * [fa60ca0c](https://github.com/argoproj/argo-events/commit/fa60ca0c854991b41924416054e51b1e3f86784c) fix trigger dependencies yaml (#1276) + +### Contributors + + * Alex Collins + * Andrew VanderVeen + * Antonio Macías Ojeda + * Arnar + * Daniel + * Derek Wang + * Stephan van Maris + * Tianchu Zhao + * Windfarer + * hodbn + * makocchi + * roi-codefresh + +## v1.4.3 (2021-09-29) + + * [f7916206](https://github.com/argoproj/argo-events/commit/f79162063b9f372d55e7cdddabcade3efdad01ac) Update manifests to v1.4.3 + * [86183f0e](https://github.com/argoproj/argo-events/commit/86183f0ec50cd2f1d48a8e3d190f52d23962aea0) fix: disable bool simplifier due to performance issue. Fixes #1339 (#1363) + +### Contributors + + * Derek Wang + +## v1.4.2 (2021-09-21) + + * [9ab2208e](https://github.com/argoproj/argo-events/commit/9ab2208e7347b80503ba60197b8c2513c3b8f4ef) Update manifests to v1.4.2 + * [0e86bbed](https://github.com/argoproj/argo-events/commit/0e86bbedca880b6acecfb8690efb0600707beca9) fix: mask auth secret. Fixes: #1336 (#1337) + +### Contributors + + * Derek Wang + +## v1.4.1 (2021-08-31) + + * [8d95d1bb](https://github.com/argoproj/argo-events/commit/8d95d1bb768e1faf571a400c04c5a1b8fed594ff) Update manifests to v1.4.1 + * [9cac83ab](https://github.com/argoproj/argo-events/commit/9cac83ab5ec62cf214fe2125d58f1277d4c6c0b0) feat: expose image pull policy and upgrade stan to v0.22.1 (#1325) + +### Contributors + + * Derek Wang + +## v1.4.0 (2021-07-12) + + * [0e7a0b4a](https://github.com/argoproj/argo-events/commit/0e7a0b4a49dfcdc8e1d32c8db1267d916b3d8f2c) Update manifests to v1.4.0 + * [b38bed14](https://github.com/argoproj/argo-events/commit/b38bed14859b0caed6c17c04243042107257ae16) feat: customize maxBytes and maxMsgs for EventBus (#1272) + * [ce67b385](https://github.com/argoproj/argo-events/commit/ce67b3858b91c48914d1a3ccc900d539cc31a66f) fix: duplicate trigger name validation. Fixes #1262 (#1263) + * [37d5d0dc](https://github.com/argoproj/argo-events/commit/37d5d0dc74e4f24024edee2dde00d0926e8415a7) feat: add terminate event (#1268) + * [3f7a6285](https://github.com/argoproj/argo-events/commit/3f7a6285c425d5f9077d0ee3df6c7ff1a6ed52af) Update 03-trigger-sources.md (#1264) + * [b5e5d8e8](https://github.com/argoproj/argo-events/commit/b5e5d8e8b3186660e3f456ed3c36919a10d96acf) add link to examples and adjust punctuation (#1256) + * [38cd9b64](https://github.com/argoproj/argo-events/commit/38cd9b648b257a007ed29d53d444d482afdfb503) fix: default param value not applied in some cases (#1254) + * [e92d1056](https://github.com/argoproj/argo-events/commit/e92d1056b655437cc759d4ea45134ceb4e3d237b) feat: amqp event source authentication (#1252) + * [d403c441](https://github.com/argoproj/argo-events/commit/d403c441bc1d4032daff4e54b496f9342cc5cd57) fix(ci): release action (#1251) + * [2f907b14](https://github.com/argoproj/argo-events/commit/2f907b1453809c3d53b1b166d26cc54104fde097) feat: Introduce expr filters (#1226) + * [7a489d1e](https://github.com/argoproj/argo-events/commit/7a489d1ebddffcc9226a6de04dad94bd8fdf80e7) Secure headers for HTTP Trigger (#1229) + * [6c2d91bb](https://github.com/argoproj/argo-events/commit/6c2d91bb2e4e174927a1d567beaf2aa32c30d392) fix: Kafka/SASL Verbiage cleanup and examples (#1220) + * [76d16eb0](https://github.com/argoproj/argo-events/commit/76d16eb0a4cf3b26141d4598cc43b81ef3cf643a) fix: limit slack API calls (#1207) + * [c8e2dfd7](https://github.com/argoproj/argo-events/commit/c8e2dfd73adc248645685fc84b9d0b4e6d69dd00) fix(ci): fix env name for quay.io (#1211) + * [b44128a2](https://github.com/argoproj/argo-events/commit/b44128a26397f812ef43b60396d19c03d2f758de) docs(argo-events): Add link to FAQ on debug (#1210) + * [cb3f20d6](https://github.com/argoproj/argo-events/commit/cb3f20d654e536ed1a14e00e7ca564fbc57643c5) docs(argo-events): Added example with debug log enabled (#1208) + * [b9fb2a29](https://github.com/argoproj/argo-events/commit/b9fb2a292e3ccfdf7547ea3419f3ff8cce1cdaec) docs(argo-events): Added helpful information to FAQ (#1209) + * [08b4fedd](https://github.com/argoproj/argo-events/commit/08b4fedd8d7464ed2820fe82c7568d0a718d9fbf) fix: retry duration recognize strings. Fixes #1200 (#1201) + * [bd45e0a6](https://github.com/argoproj/argo-events/commit/bd45e0a6d3de188e8b65db7f25ec6f97b880ce71) fix: Always mount tmp emptyDir. Fixes #1194 (#1196) + * [a5f1988a](https://github.com/argoproj/argo-events/commit/a5f1988ae70819acde90ceb2572d275e4cdfdb1d) fix: typo in readme (#1193) + * [41027b5e](https://github.com/argoproj/argo-events/commit/41027b5e814c4387778e3b0d9252f8e5330fa4e7) feat: kafka sasl auth (#1186) + * [8f96b097](https://github.com/argoproj/argo-events/commit/8f96b097242b8380de84c9ee670923746e962f61) feat(github-eventsource): Support multi repos and mitigate race conditions (#1181) + * [c71d10dd](https://github.com/argoproj/argo-events/commit/c71d10dd1a6436b2fe8359502848bcd86f986af7) feat(lambda-trigger): expose Lambda invocation type. Closes: #994 (#1187) + * [79cae5c1](https://github.com/argoproj/argo-events/commit/79cae5c1edfa85397c61b397b15f1ce8cefabff9) fix: Fast fail invalid SNS notification. Fixes: #1182 (#1185) + * [c162d9a4](https://github.com/argoproj/argo-events/commit/c162d9a475281b90b6a1b615f9473ee03a338bb0) feat: enable affinity for Sensor (#1176) + * [910156f0](https://github.com/argoproj/argo-events/commit/910156f027b45edc54d065e2221a2a53ad535d07) Update link in 02-parameterization.md (#1174) + * [5cd535b2](https://github.com/argoproj/argo-events/commit/5cd535b22a2e8fb1acf9e206055df5ca8b85a8f9) feat: EventSource and Sensor HA without extra RBAC (#1163) + * [1efd3def](https://github.com/argoproj/argo-events/commit/1efd3defab40e4a6a32a3bc382779f714d68ca76) fix: Backoff retry should not swallow errors. Fixes #1166 (#1167) + * [08b59611](https://github.com/argoproj/argo-events/commit/08b596113aec6fa165f3d0384e55b51906f0708e) Added Produvar to USERS.md (#1122) + * [9289f476](https://github.com/argoproj/argo-events/commit/9289f476dbbe199059695e9dc4049e30e9998804) feat: HA support for event sources and sensors (#1158) + * [e8aaa58c](https://github.com/argoproj/argo-events/commit/e8aaa58c5f54932ac08f9ac939c0142a15286d31) fix(resource-eventsource): Use event time instead of obj create time to filter UPDATE/DELETE events (#1157) + * [63d6cd94](https://github.com/argoproj/argo-events/commit/63d6cd942bb8d6a330099b25e6076a3d1d5db323) feat: use crypto/rand to generate event bus token (#1149) + * [2fa5185b](https://github.com/argoproj/argo-events/commit/2fa5185b03444bcb1b573fba6ed7abfc6010053c) feat: azure event hubs trigger (#1140) + * [9340b982](https://github.com/argoproj/argo-events/commit/9340b98277a0bc01fd158d7a558e6f354f1715b3) fix: Trim newline of the content read from Secrets or Configmaps (#1146) + * [6b5b345b](https://github.com/argoproj/argo-events/commit/6b5b345bf786e33d8195350f657038f01658743b) fix(docs) gitlab -> github (#1145) + * [e1b45807](https://github.com/argoproj/argo-events/commit/e1b4580775761429398e9abab43fb75850875ee6) fix(stress-testing): only check total requests when it is specified (#1135) + * [08f5d947](https://github.com/argoproj/argo-events/commit/08f5d9475ca0746738dadccf92a4abb14f79e55b) fix(git-sensor): force fetch from git repository + +### Contributors + + * AJ Bowen + * Alec Rajeev + * Derek Wang + * Jaga Santagostino + * Joshua Jorel Lee + * KeisukeYamashita + * Luis Magana + * Niek Oost + * Rob K + * Rory LaMendola + * Vaibhav + * cbuckley01 + * meijin + * orbatschow + +## v1.3.1 (2021-05-04) + + * [f47cb23c](https://github.com/argoproj/argo-events/commit/f47cb23c421fb8b423114b028d5da5efd4e39de4) Update manifests to v1.3.1 + * [6f9823f4](https://github.com/argoproj/argo-events/commit/6f9823f44d021a58e727a841c0a1ec436732dcfb) fix: Always mount tmp emptyDir. Fixes #1194 (#1196) + * [071ca70f](https://github.com/argoproj/argo-events/commit/071ca70faf1e7e36364e4b0af8ae5f17cd12d56c) fix: typo in readme (#1193) + * [ebc1fac9](https://github.com/argoproj/argo-events/commit/ebc1fac99467eab44f5c7854552c81822108baae) feat(github-eventsource): Support multi repos and mitigate race conditions (#1181) + * [69c36cde](https://github.com/argoproj/argo-events/commit/69c36cde2df592701a0346a88e9716ade464229c) fix: Fast fail invalid SNS notification. Fixes: #1182 (#1185) + +### Contributors + + * Derek Wang + * KeisukeYamashita + +## v1.3.0 (2021-04-12) + + * [75911469](https://github.com/argoproj/argo-events/commit/7591146915e0a6e413484eff83e6762a9fd3f34a) Update manifests to v1.3.0 + * [8fd9ae72](https://github.com/argoproj/argo-events/commit/8fd9ae72740ce3b2015cc8c11fa55caa5ea236e3) feat: enable affinity for Sensor (#1176) + * [f752bc09](https://github.com/argoproj/argo-events/commit/f752bc09a8029f6fccdecdb453b185f85a248736) Update link in 02-parameterization.md (#1174) + +### Contributors + + * Derek Wang + * Niek Oost + +## v1.3.0-rc4 (2021-04-07) + + * [e1aabf95](https://github.com/argoproj/argo-events/commit/e1aabf958242f0d48e4ac26cb818bee5731200a1) Update manifests to v1.3.0-rc4 + * [0435122a](https://github.com/argoproj/argo-events/commit/0435122a3e9dd0b5f98057862194450f768135cb) feat: EventSource and Sensor HA without extra RBAC (#1163) + * [4b818d5f](https://github.com/argoproj/argo-events/commit/4b818d5ff6369c94266961122d0e86bf2a2883fd) fix: Backoff retry should not swallow errors. Fixes #1166 (#1167) + +### Contributors + + * Derek Wang + +## v1.3.0-rc3 (2021-04-05) + + * [ccc01085](https://github.com/argoproj/argo-events/commit/ccc010851f0571d60241eba7fcd38186e4172d20) Update manifests to v1.3.0-rc3 + * [b3bca624](https://github.com/argoproj/argo-events/commit/b3bca624e1b88cdcf7793ce910c23f4c3dce3889) Added Produvar to USERS.md (#1122) + * [85c4157c](https://github.com/argoproj/argo-events/commit/85c4157cfd7d20b92a6cde1a9ae45f53a43cc630) feat: HA support for event sources and sensors (#1158) + * [f945fb5f](https://github.com/argoproj/argo-events/commit/f945fb5f574da50b1877062db80d86688e9524a6) fix(resource-eventsource): Use event time instead of obj create time to filter UPDATE/DELETE events (#1157) + +### Contributors + + * Derek Wang + * Niek Oost + +## v1.3.0-rc2 (2021-03-31) + + * [71f3744b](https://github.com/argoproj/argo-events/commit/71f3744b4deaac1cfa2ffc7317f7364c05cfb5b9) Update manifests to v1.3.0-rc2 + * [1d047c7f](https://github.com/argoproj/argo-events/commit/1d047c7fe6ff4d269e2acbc71f48a3e93e87acdb) feat: use crypto/rand to generate event bus token (#1149) + * [a23a50c8](https://github.com/argoproj/argo-events/commit/a23a50c8c6a72beda8ebef1b9c192f3f0ad8a151) feat: azure event hubs trigger (#1140) + * [d31cc640](https://github.com/argoproj/argo-events/commit/d31cc64046dde99ad231290abc61b9753865feec) fix: Trim newline of the content read from Secrets or Configmaps (#1146) + * [219780e3](https://github.com/argoproj/argo-events/commit/219780e357fba7146f65b9997851a099193af3d0) fix(docs) gitlab -> github (#1145) + * [21bc5bf6](https://github.com/argoproj/argo-events/commit/21bc5bf6342fa58ef06152758ba905243e272266) fix(stress-testing): only check total requests when it is specified (#1135) + * [3a2581dc](https://github.com/argoproj/argo-events/commit/3a2581dc41fd1e49bf0f5b2d985800161a4aa4fb) fix(git-sensor): force fetch from git repository + +### Contributors + + * Derek Wang + * Jaga Santagostino + * Joshua Jorel Lee + * Rory LaMendola + +## v1.3.0-rc1 (2021-03-23) + + * [460d5bd6](https://github.com/argoproj/argo-events/commit/460d5bd6859999efa15b68bbfd111f2cc91cc228) Update manifests to v1.3.0-rc1 + * [35f136d2](https://github.com/argoproj/argo-events/commit/35f136d2e91b50678a686c527d8cbbd83ce0309f) fix(GithubEventSource): Compare events ignoring order and duplicate (#1124) + * [30737bab](https://github.com/argoproj/argo-events/commit/30737baba97c449ed516b700eafdfc3757b017ad) feat(sensor): add encoding decode in filter matching (#1123) + * [e93ed0ff](https://github.com/argoproj/argo-events/commit/e93ed0ff15cf8d5560e2ca1a0905a53a1e85ff9c) docs(user): add user to list (#1119) + * [1cc31c1d](https://github.com/argoproj/argo-events/commit/1cc31c1d3b7ea81fb6cf95dab06437885178f571) fix(SQS): Log errors with SQS connections. Fixes #1114 (#1115) + * [a3cc3ac2](https://github.com/argoproj/argo-events/commit/a3cc3ac244efa9b6b80331c24dec056fc83fb8df) Update link to Argo Workflows (#1116) + * [d2ee2b85](https://github.com/argoproj/argo-events/commit/d2ee2b85d51d7d9e88eaa94627c5be56df13a6c2) feat: metrics follow-up, latency of events processing in eventsource (#1101) + * [1382444e](https://github.com/argoproj/argo-events/commit/1382444e68f3df684cca48576ae38e787303dd8f) fix: Changed Result.Str to Result.String() (#1098) + * [a97dd9a7](https://github.com/argoproj/argo-events/commit/a97dd9a7067557e2eaf988da3877798fb46aaf73) feat: Ability to retry trigger (#1090) + * [35593c8a](https://github.com/argoproj/argo-events/commit/35593c8a69624236d09463ff202a396c8b35577e) fix: typo in README.md (#1093) + * [04068dd4](https://github.com/argoproj/argo-events/commit/04068dd49264b62476c3af0b68a176ea0ce179ef) feat: expose entire affinity field for native nats eventbus (#1083) + * [b72e00fe](https://github.com/argoproj/argo-events/commit/b72e00fe060e17fe0a14137d33af93140edf1fee) feat: enable Priority and PriorityClassName for CRD objects. (#1081) + * [8eed06b8](https://github.com/argoproj/argo-events/commit/8eed06b85339164e80a8a17449e8c062f0042ca8) feat: Expose Prometheus metrics. Closes #204 (#1047) + * [8b77d27e](https://github.com/argoproj/argo-events/commit/8b77d27e39af58a61215a02f52f5e1830c85e128) fix(sensor): Slack trigger should not join conversation for private channel (#1078) + * [6a169cb3](https://github.com/argoproj/argo-events/commit/6a169cb3201d4fcf2eafe838ec75019f03cb66ab) fix: tls config validation, follow up on #1070 (#1076) + * [dcda4225](https://github.com/argoproj/argo-events/commit/dcda422577759ae6cc96b30508dcc25633f66ee0) fix: make caCert, clientCert and clientKey optional for tls config (#1070) + * [95d08187](https://github.com/argoproj/argo-events/commit/95d0818723af0c0015a4f56c0e865680765b6c22) feat: Prevent EventBus with clients connected from being deleted (#1066) + * [d22f1ffd](https://github.com/argoproj/argo-events/commit/d22f1ffdc771eb3af35233265374b4b67ee303b0) feat(eventsource): Support NATS access with auth. Closes #1050 (#1052) + * [f6ba65d2](https://github.com/argoproj/argo-events/commit/f6ba65d2b37119f0156f58aa6a90c50c126ccda0) fix(docs): fix webhook installation url (#1053) + * [bf63a4df](https://github.com/argoproj/argo-events/commit/bf63a4df56462633d022f7f3b96a7376d3a4a76a) fix: Use different clientID for each EventBus reconnection. Fixes #1055 (#1061) + * [714b4651](https://github.com/argoproj/argo-events/commit/714b4651f9b86bcd219651d93315e494825d2959) fix(slack trigger): properly iterate api response (#1058) + * [58cc0930](https://github.com/argoproj/argo-events/commit/58cc0930603dfd4a685eb904e17610bbc2ca8637) feat: Introducing a Validating Admission Controller (#1021) + * [706b7468](https://github.com/argoproj/argo-events/commit/706b74682386fbd170c5631487a2051a7f5db149) feat: implement delete function for k8s objects (#1041) + * [0ebf9594](https://github.com/argoproj/argo-events/commit/0ebf95948fcd7d0f7e148272222bdd43d9e481c0) Adding the link to the video that explains Events (#1044) + * [6f199acf](https://github.com/argoproj/argo-events/commit/6f199acfb8cb59ee7dac86deabbf27dbca5e7493) fix: correcting the value of Types (#1038) + * [c0454aa4](https://github.com/argoproj/argo-events/commit/c0454aa4712f1ee6fcc50be9c31e37d3ca9eda63) fix(codegen): EventBus codegen plural issue introduced by k8s upgrade (#1037) + * [24a0115a](https://github.com/argoproj/argo-events/commit/24a0115ac359a33d2237cf8c87183a76aa63ab25) fix: Not able to send message to slack private channel (#1036) + * [4c93e07e](https://github.com/argoproj/argo-events/commit/4c93e07e5f61f363a0bafd2ccd566d0d91bdcff5) fix(eventbus): set nats routes with pod DNS names. Fixes #1026 (#1033) + * [74b9fc24](https://github.com/argoproj/argo-events/commit/74b9fc242f28b024846d3f1e4b8289fbd5bb4c43) feat: use status resources for controller reconciliation. Closes #1029 (#1030) + * [f9d2cc04](https://github.com/argoproj/argo-events/commit/f9d2cc04120d1ad842681ce02ecf8c596bad26fc) fix(sensor-controller): Return err when a sensor spec is invalid. Fixes #1017 (#1018) + * [01368464](https://github.com/argoproj/argo-events/commit/013684647aa0227afefd4434b93e06a498e41a30) feat: AMQPEventSource extra parameters. Fixes #1007 (#1009) + * [118856ef](https://github.com/argoproj/argo-events/commit/118856ef2e1ad2ed941a29da3d799c38659b9210) fix: Kafka sensor url. Fixes #1006 (#1008) + +### Contributors + + * Aleksander Gondek + * AnaisUrlichs + * Davide Berdin + * Derek Wang + * Jason Froehlich + * Orion Delwaterman + * Rob K + * Shashwat Rawat + * Stéphane Este-Gracias + * Viktor Farcic + * tczhao + +## v1.2.3 (2021-02-18) + + * [82068ece](https://github.com/argoproj/argo-events/commit/82068eceee4f7efc2f5a88126a18adc4b2ec1604) bump to v1.2.3 + * [3bc5ecb4](https://github.com/argoproj/argo-events/commit/3bc5ecb422654d2a4c1e42ec2780ef0caab1e1a7) fix: make caCert, clientCert and clientKey optional for tls config (#1070) + * [6baa8fcc](https://github.com/argoproj/argo-events/commit/6baa8fcc0a3760e0359bbef09b537765a44b09e0) fix(slack trigger): properly iterate api response (#1058) + +### Contributors + + * Aleksander Gondek + * Derek Wang + +## v1.2.2 (2021-01-27) + + * [976f205f](https://github.com/argoproj/argo-events/commit/976f205f1044af9680927e47618cf32b522e15c9) bump to v1.2.2 + * [18510a49](https://github.com/argoproj/argo-events/commit/18510a49b40d05096c8eeb0f14b102065d07257c) fix: correcting the value of Types (#1038) + +### Contributors + + * Derek Wang + * Shashwat Rawat + +## v1.2.1 (2021-01-26) + + * [e963d020](https://github.com/argoproj/argo-events/commit/e963d02042ad899aae610fc1b8e4ba08200af1f4) bump to v1.2.1 + * [e1ff0f49](https://github.com/argoproj/argo-events/commit/e1ff0f494253f943edb2527cfd514172fc0c4fe8) fix(codegen): EventBus codegen plural issue introduced by k8s upgrade (#1037) + * [e0de1638](https://github.com/argoproj/argo-events/commit/e0de1638993cbf41054bcd5ddce28981abdf69de) fix: Not able to send message to slack private channel (#1036) + * [58e139a7](https://github.com/argoproj/argo-events/commit/58e139a7aa46c35536b633d11153d804a646e1fd) fix(sensor-controller): Return err when a sensor spec is invalid. Fixes #1017 (#1018) + +### Contributors + + * Derek Wang + * Shashwat Rawat + +## v1.2.0 (2021-01-08) + + * [6dfa3402](https://github.com/argoproj/argo-events/commit/6dfa34021674460247c9b7eed17097028b20d105) bump to v1.2.0 + * [c29e9473](https://github.com/argoproj/argo-events/commit/c29e9473f461018bb1229f76bada95951d9249c3) fix(docs): Fix typos in eventbus.md (#1004) + * [4cc221f3](https://github.com/argoproj/argo-events/commit/4cc221f3849526b4dbfcee42fb8b1ce0fefe9290) fix(sensor): Disable debug mode for slack trigger. Fixes #944 (#1003) + * [50c79ce1](https://github.com/argoproj/argo-events/commit/50c79ce1342cb22eba6a6ebd86de350e014d889c) fix(eventbus): Use DurableQueueSub to avoid occasional duplicate durable registration error. Fixes #940 (#1002) + * [b518d88a](https://github.com/argoproj/argo-events/commit/b518d88abe8d325cdd66c0412df16cbf536f80a3) feat: GenericEventSource to support authentication. Closes #928 (#935) + * [31d3e937](https://github.com/argoproj/argo-events/commit/31d3e937be0058ee992d43b18e8db4281dff1850) feat: ability to specify version in kafka trigger. Closes #990. (#991) + * [97e4926c](https://github.com/argoproj/argo-events/commit/97e4926c7c627dd833d5da8e2f261a7127ea94f3) fix: Build and publish latest images automatically (#997) + * [6a935820](https://github.com/argoproj/argo-events/commit/6a9358204a065cf902b289d2f7cccad11c3c362c) fix: handle slack slash command payloads. Fixes #653 (#992) + * [6ee3328f](https://github.com/argoproj/argo-events/commit/6ee3328fafa924c0ced4aef2e3675b72b0bfb982) Fix/eventsource resource return entire event object (#981) + * [ab4f16db](https://github.com/argoproj/argo-events/commit/ab4f16db4476f1e73b65e58e5d31818a44986f2e) fix(eventsource/resource): return entire event object instead of just event.obj (#980) + * [a94e8126](https://github.com/argoproj/argo-events/commit/a94e8126686b05407ba5d8fe211dbca7e6fbde63) feat: Validate dependencies duplication. Closes #971 (#972) + * [39d13c97](https://github.com/argoproj/argo-events/commit/39d13c97dea695d028a641f63ec1f456c2aadbdc) fix: prevent minio bucket notification error swallowing (#964) + * [ff733681](https://github.com/argoproj/argo-events/commit/ff733681d014e2452905929e3793823b4939e116) feat: Add string comparators EqualTo and NotEqualTo (#954) + * [d1745c38](https://github.com/argoproj/argo-events/commit/d1745c3813d569c8f5280808293ddad8eadf51c8) feat(trigger): add log.intervalSeconds (#949) + * [9c1d7dd9](https://github.com/argoproj/argo-events/commit/9c1d7dd9cce7e657e51d81a4756232a6115742c7) feat: make github webhook creation optional (#690) + * [feed8019](https://github.com/argoproj/argo-events/commit/feed8019901222470e02c693ee7216f59ecb9a50) feat: SNS messages should validate the message signature. Fixes #831 (#919) + * [7f474efb](https://github.com/argoproj/argo-events/commit/7f474efbe933f62bfaff76f0f9f76a5538bd9cc5) feat: Adding ServiceAccountName for EventBus StatefulSet. Closes #957 (#956) + * [4a500b02](https://github.com/argoproj/argo-events/commit/4a500b0287632cc7c2e9b81df94c78e802b6c361) feat: Adds the ability to inject ImagePullSecrets. Closes #946 (#947) + * [b448413d](https://github.com/argoproj/argo-events/commit/b448413d2791f24269dcfa1eacd7683c4f88c1da) doc: Update event_source.md (#950) + * [ab7506d2](https://github.com/argoproj/argo-events/commit/ab7506d2b020b5c5a3aa0ca54e0776d2c4b4279c) feat(sensor): Adds the log trigger (#941) + * [8d3904a0](https://github.com/argoproj/argo-events/commit/8d3904a03ca867a595aeac671d81f70884944c0b) doc: Adding `channels:read` permission to Slack Trigger Documentation (#943) + +### Contributors + + * Alex Collins + * Artem Yarmoliuk + * Carlos + * Christopher Cutajar + * Derek Wang + * Evan + * Javier Salinas + * Lucas Heinlen + * Pierre Lebrun + * Riya John + * Scott Weitzner + +## v1.1.0 (2020-11-16) + + * [cb96dd14](https://github.com/argoproj/argo-events/commit/cb96dd1490e675c612ca28671dda93e5b4832a59) bump to v1.1.0 + +### Contributors + + * Derek Wang + +## v1.1.0-rc1 (2020-11-04) + + * [ea738bc4](https://github.com/argoproj/argo-events/commit/ea738bc4cff848716fa77f7de5f3fefffebe33de) bump to v1.1.0-rc1 + * [2bae60d4](https://github.com/argoproj/argo-events/commit/2bae60d4b88f8751d171cd373a26688c1d8dd1e3) fix(build): broken build script (#923) + * [e64eaaa1](https://github.com/argoproj/argo-events/commit/e64eaaa176835831f06a7734ce569f619c3886c2) fix(amqp): Return error when channel is closed. Fixes #920 (#925) + * [6efb0e0d](https://github.com/argoproj/argo-events/commit/6efb0e0d93e8151077eae87714bbf8bf8b671621) feat: Retry starting event server and quick fail. Closes #926 (#927) + * [c446badf](https://github.com/argoproj/argo-events/commit/c446badf8aa633d86bd75bff2b841f88d6fcd3f4) fix(generic-eventsource): Set payload when jsonbody is not true. Fixes #930 (#931) + * [6abeb336](https://github.com/argoproj/argo-events/commit/6abeb33669306723bb5c7e28f138f4e98b67bc6e) feat(calendar): Support catchup on calendar eventsource closes #897 (#911) + * [c1a7453c](https://github.com/argoproj/argo-events/commit/c1a7453c04d574912c0e073d598adf0afb5507e9) feat: extend resource eventsource field filter. Closes #913 (#915) + * [381b6a88](https://github.com/argoproj/argo-events/commit/381b6a884866f0da7279f689b1a78976263d1b68) feat: Make native nats eventbus max message age configurable. (#901) + * [63657970](https://github.com/argoproj/argo-events/commit/6365797028bb4a9cb4a6ca3a46f0c870e24fc1dd) feat: controllers runAsNonRoot. Closes #906 (#907) + * [14afaf19](https://github.com/argoproj/argo-events/commit/14afaf19750892c58a9a85bfdb5004a67bddacbf) fix(eventsource): allow dot in EventSource pod name (#916) + * [160657f1](https://github.com/argoproj/argo-events/commit/160657f18961af7006567947f82b2b06c0808921) feat: generic event source (#895) + * [1eee338c](https://github.com/argoproj/argo-events/commit/1eee338ce5d77cb5658212c5a67692a73f3e9a7f) feat: enable SecurityContext option for eventbus pods. Closes #903 (#904) + * [9d23043f](https://github.com/argoproj/argo-events/commit/9d23043faf1f3b34ced51e0ce555cc28a87cf202) feat: kafka multiple-broker support. Closes #892 (#894) + * [4f63d425](https://github.com/argoproj/argo-events/commit/4f63d42561e745229b0cab2ab8c377368e3b4200) Fix typo: complaint => compliant (#905) + * [43f5031d](https://github.com/argoproj/argo-events/commit/43f5031d674cd367fab61f2cad992dd4bbd42f15) fix(ci): Clean up multi arch build (#885) + * [3cf92547](https://github.com/argoproj/argo-events/commit/3cf92547cce01a26dc58e5634837936fcea901e2) feat: cloneDirectory not required for git source trigger (#882) + * [4bbd2d91](https://github.com/argoproj/argo-events/commit/4bbd2d916a124e08ae66927438da34e906e879d4) feat: Pass CRD metadata to generated resources (#876) + * [166e36db](https://github.com/argoproj/argo-events/commit/166e36db23ca82e5d28a264e63ef125457a24858) feat: Implemented Exact Once triggering for NATS event bus (#873) + * [78647375](https://github.com/argoproj/argo-events/commit/78647375701496a72d5100d5dba18ba05970f823) fix(docs): tutorial-introduce typo fix (#880) + * [f2411be5](https://github.com/argoproj/argo-events/commit/f2411be5eb7b3018c1f84700a44ba29a9a48b90b) fix: Add logic to ticker loop to ensure subscription always exists with an open connection (#861) + * [52bec989](https://github.com/argoproj/argo-events/commit/52bec98963ad1c152fbaba8be47970597f17e6e4) fix(eventsource): fix GCP Pub/Sub behavior (#845) + * [76430083](https://github.com/argoproj/argo-events/commit/76430083fbce2154d57f7ef440f106dcc709edb8) fix: Fix Azure EventsHub issue (#846) + * [db9a7f36](https://github.com/argoproj/argo-events/commit/db9a7f3656dad6c7b9f9e652506bf1ef18122482) feat: Connect to git with InsecureIgnoreHostKey. Closes #841 (#842) + * [f8c9c55c](https://github.com/argoproj/argo-events/commit/f8c9c55cc27189dfea10b83e74e0bdd58e019799) feat: Simplify Circuit and Switch with Conditions (#834) + * [2774a8bf](https://github.com/argoproj/argo-events/commit/2774a8bfbc65b02a897c5f80054555394422a160) feat: Simplify TLS config for EventSources and Sensors. Closes #828 (#830) + * [ff7f664e](https://github.com/argoproj/argo-events/commit/ff7f664e10055b9f6e3a92ea887a4095d32552b3) fix(eventbus): update log path to be together with data path (#833) + * [d14487d4](https://github.com/argoproj/argo-events/commit/d14487d4cf1ea36dce98a28f922e10254cd9af69) feat: Rate limit for Kafka and initial offset support and refactor config (#829) + +### Contributors + + * Christophe Benz + * Derek Wang + * Saravanan Balasubramanian + * Shinichi TAMURA + * Trevor Foster + * Vaibhav + * Zach Aller + * cs_lee + +## v1.0.0 (2020-09-04) + + * [2f5571e4](https://github.com/argoproj/argo-events/commit/2f5571e492342ac3302710c277fd35c22abbd17a) bump to v1.0.0 + * [8961e468](https://github.com/argoproj/argo-events/commit/8961e468019971ac972ecfa681e8109ea9b7b833) fix: Add logic to ticker loop to ensure subscription always exists with an open connection (#861) + +### Contributors + + * Derek Wang + * Trevor Foster + +## v1.0.0-rc3 (2020-08-25) + + * [1b5e3859](https://github.com/argoproj/argo-events/commit/1b5e3859c61f7aed226e55403c77f2b186b71ee8) bump to v1.0.0-rc3 + * [364f7185](https://github.com/argoproj/argo-events/commit/364f71854572cb7c518b256bbb8799ea778f178e) fix: Fix Azure EventsHub issue (#846) + * [f3f932af](https://github.com/argoproj/argo-events/commit/f3f932afda7f33bc1ebcf281a954d4d12eb355f1) feat: Connect to git with InsecureIgnoreHostKey. Closes #841 (#842) + * [f05492ec](https://github.com/argoproj/argo-events/commit/f05492ec66bf75795d49444ad41e8637c316180f) feat: Simplify Circuit and Switch with Conditions (#834) + * [c7b9a507](https://github.com/argoproj/argo-events/commit/c7b9a507221f615c687a07337f9dbea56a239109) feat: Simplify TLS config for EventSources and Sensors. Closes #828 (#830) + * [24caf93d](https://github.com/argoproj/argo-events/commit/24caf93d8c5af5a9bb8d94b0ccce167e58e3c833) feat: Rate limit for Kafka and initial offset support and refactor config (#829) + +### Contributors + + * Derek Wang + * Vaibhav + * Zach Aller + +## v1.0.0-rc2 (2020-08-16) + + * [44eccc33](https://github.com/argoproj/argo-events/commit/44eccc3326cf832e26cfdfd170ac6178d1ba34cd) Update manifests to v1.0.0-rc2 + * [71b65ff6](https://github.com/argoproj/argo-events/commit/71b65ff6cebb3e122a964bbf3aa8322e3ddae8bb) fix(eventbus): update log path to be together with data path (#833) + +### Contributors + + * Derek Wang + +## v1.0.0-rc1 (2020-08-10) + + * [a9d44517](https://github.com/argoproj/argo-events/commit/a9d44517433ef20ad945f2c20110708aae3a7f7a) Update manifests to v1.0.0-rc1 + * [bfbbd33c](https://github.com/argoproj/argo-events/commit/bfbbd33c589de141383cb9e0de909b396374bed2) feat: Simple Authentication for webhook related event sources. Closes #821 (#826) + * [fa33eb3a](https://github.com/argoproj/argo-events/commit/fa33eb3a642c1cb3034732a4cbc8c7d98ec33853) fix: commands in docs (#825) + * [757dbe7b](https://github.com/argoproj/argo-events/commit/757dbe7b09ead4fa00cf085c73793dae07bd01a6) feat: Simple health check endpoint for webhooks (#823) + * [6471850c](https://github.com/argoproj/argo-events/commit/6471850c685cb8a60d141ca11903cdd02a3303e1) fix: Use WaitGroup to start eventsoures and sensors (#819) + * [a0cb29d9](https://github.com/argoproj/argo-events/commit/a0cb29d978db58bad78ab3a70e371f7aeeac2fac) feat: Add `NotEqualTo` comparator to data filters (#822) + * [74f431e3](https://github.com/argoproj/argo-events/commit/74f431e3b8495100ebe773b5c2b8e926d2e677c2) feat: Kafka Consumer Group (#817) + * [8a102871](https://github.com/argoproj/argo-events/commit/8a102871df674278d7a3d3d3f16f59ffdf58cf72) fix: retry when connect to eventbus in sensor (#816) + * [7256a570](https://github.com/argoproj/argo-events/commit/7256a57074681b7174c3c212c86b0a77dabbc0b0) fixed broken link (#814) + * [12ae7b2e](https://github.com/argoproj/argo-events/commit/12ae7b2e80f84faa0bafa476512473c59bbcb5be) fix(workflow-trigger): Add labels to created k8s objects. Fixes #794 (#805) + * [ded50d62](https://github.com/argoproj/argo-events/commit/ded50d62ac6a2d9e21c94863994d8c6e74d2d067) feat: added user metadata to eventsources (#797) + * [63512448](https://github.com/argoproj/argo-events/commit/63512448b260665f2c7f0ed26a14184e9d6e95f5) fix(eventbus): Bugfix for EventBus with AuthStrategyNone (#798) + * [099ed605](https://github.com/argoproj/argo-events/commit/099ed605dd80db9f3b607c7987d945b428068095) feat: Switch to use volumes for injected secrets and configMaps in triggers (#792) + * [6f26ae99](https://github.com/argoproj/argo-events/commit/6f26ae999bfd75e3582d9b558d72560f86e5664c) fix: azure eventsource (#789) + * [adee1b5b](https://github.com/argoproj/argo-events/commit/adee1b5b2c18c26a176352fd8cabdf451801a5d2) fix: Added/fixed tolerations for CRDs (#787) + * [527a45fa](https://github.com/argoproj/argo-events/commit/527a45fa661c4c6d8315a170225e724f26582d44) fix: switch slack lib and stop using deprecated APIs. Fixes #726 (#777) + * [c57bdb69](https://github.com/argoproj/argo-events/commit/c57bdb69926e9156c1af5f7c1f3fb340fac0c1fa) feat: Support re-using existing subscription ID for gcp pubsub (#778) + * [1cf98ab4](https://github.com/argoproj/argo-events/commit/1cf98ab45ad1ae189da7a89fd453252f0e8e890b) feat: pulsar event source (#774) + * [21f4360f](https://github.com/argoproj/argo-events/commit/21f4360fcfbb97c00fae881f9bd1a9096bac8d6c) feat: Expose metadata for sensors and fix metadata for eventsources (#773) + * [c911103d](https://github.com/argoproj/argo-events/commit/c911103db12855147985cff33bb67d0c1c5f9a43) fix(manifests): Fixed namespace in kustomization files (#772) + * [022d903c](https://github.com/argoproj/argo-events/commit/022d903c84d33f03b08f70797c2057bbf49b2280) fix: clean up installation (#771) + * [f04a0c28](https://github.com/argoproj/argo-events/commit/f04a0c28be80afa04fc168576628e865fd704c2b) fix: links (#770) + * [fa85a961](https://github.com/argoproj/argo-events/commit/fa85a961800de61c63da8859e0fd940ba2ea16f7) fix: docs (#768) + * [9f382f95](https://github.com/argoproj/argo-events/commit/9f382f95c780db53c90327837b7b459f3db22576) fix: add logo (#767) + +### Contributors + + * Derek Wang + * Halil İbrahim Şener + * Herb Brewer + * Vaibhav + * Weston Platter + * Zach Aller + +## v0.17.0 (2020-07-24) + + * [465f9238](https://github.com/argoproj/argo-events/commit/465f92382f383300f45f36bf53fa1999f74b8502) fix: release action (#766) + * [258e4461](https://github.com/argoproj/argo-events/commit/258e4461e3fda7df733576db644179717cc8e61e) fix(eventbus): Refine auto-reconnection and bugfix (#761) + * [6ebca1aa](https://github.com/argoproj/argo-events/commit/6ebca1aa63a2cbe50e549ff6419fbf16622c1e0b) feat(sensor)!: start & stop is required, and allow stop < start in time-filter (#737) + * [073995c1](https://github.com/argoproj/argo-events/commit/073995c1bcbae696e5a3f7b30d1dbbdbe14e5404) feat: remove wf dep (#758) + * [7fd612f7](https://github.com/argoproj/argo-events/commit/7fd612f7d291b4f804d54d9f5569d44b3d364a7f) feat: expose NodeSelecotr for CRDs (#756) + * [2363cc1f](https://github.com/argoproj/argo-events/commit/2363cc1f448ebc689b6cde967ca90b6a5461cef7) feat: Different deployment update strategy for different event sources (#755) + * [a8afcb70](https://github.com/argoproj/argo-events/commit/a8afcb706d48d84dc25d4e203512a610ac3327d5) fix(eventbus): NATS eventbus auto reconnect and code refactory (#749) + * [210aefb3](https://github.com/argoproj/argo-events/commit/210aefb32a223cc6e0ef85870b111bae45cf97e8) feat: native nats eventbus metrics and template customization (#745) + * [75600afe](https://github.com/argoproj/argo-events/commit/75600afeb223a89fbb330429c7b0e3236a915b5a) modify credentialsFile to be in sync with gateway (#727) + * [2f027680](https://github.com/argoproj/argo-events/commit/2f027680621e8d7696a917764fd12519c757025e) feat: Merge Gateway and EventSource (#735) + * [071aedb7](https://github.com/argoproj/argo-events/commit/071aedb71cae0b197a70651384e45c76c476afa2) Updated the sensor base dockerimage to fix vulnerabilities (#731) + * [4e8cf29d](https://github.com/argoproj/argo-events/commit/4e8cf29dfe17cae6e9c8b7f119fe81c3abd4aa4a) feat: Sensor-controller and Sensor re-implementation (#723) + * [9a62f30a](https://github.com/argoproj/argo-events/commit/9a62f30a17f966ad44376eec96f6d02241f9fee2) use json log formatter if stdout isnt tty (#699) + * [693e2109](https://github.com/argoproj/argo-events/commit/693e2109d33d152175cb38dc63198b9517a2032d) fix: Fix kafka gateway (#704) + * [500df0ac](https://github.com/argoproj/argo-events/commit/500df0ac9ffcaaf3e67dd9c5cf691e6be931c931) feat: Replace gatewayName with eventSourceName in Sensor dependencies (#687) + * [6aabc5a8](https://github.com/argoproj/argo-events/commit/6aabc5a8905b97a459be7d4a3d96f9f6bd2cdcf6) fix: Updated common.Status to inheritance (#708) + * [145fd9a9](https://github.com/argoproj/argo-events/commit/145fd9a940833c6e5b398994305d85847215d957) fix(types)!: Correct pkg and generate .proto files from them (#701) + +### Contributors + + * Alex Collins + * Catalin Jora + * Derek Wang + * Kannappan Sirchabesan + * Shinichi TAMURA + * Trevor Foster + * Vaibhav + * Zach Aller + +## v0.16.0 (2020-06-14) + + * [82cb8307](https://github.com/argoproj/argo-events/commit/82cb8307040cbae58e122c357cd2fb54398dbd60) feat: adding support for multiple events in GitLab webhook (#696) + * [df314d38](https://github.com/argoproj/argo-events/commit/df314d388520cca6ff59f7e7e9a1ffe709239234) use secret key referred in eventsource (#698) + * [08c8c962](https://github.com/argoproj/argo-events/commit/08c8c962bff3b0e242dff57b6fdcc4657ea85ea2) Fix link to Concepts in index (#688) + * [2a4d6cd9](https://github.com/argoproj/argo-events/commit/2a4d6cd91d81d38b21ef7360c0e0c3b242cc2d31) feat: storage grid auto registration (#694) + * [8b06b206](https://github.com/argoproj/argo-events/commit/8b06b206d2e4f35c055e1a1e2c4a3e87df04ed8c) fix: k8s trigger operations (#695) + * [dd35e6a0](https://github.com/argoproj/argo-events/commit/dd35e6a028041b5efecec6182dfb82327e5c45ab) fix: wrong name of Sensor (#685) + * [5e35d955](https://github.com/argoproj/argo-events/commit/5e35d9554b714c9cba2807f885cc951da3efa20f) feat: Combine nats server and streaming (#680) + * [5eed0c4a](https://github.com/argoproj/argo-events/commit/5eed0c4a63dd9b52938f21e6b85740780a09b09f) feat: Add afterStart filter to resource type eventsource (#682) + * [b0537d6c](https://github.com/argoproj/argo-events/commit/b0537d6cb1ba96e51004305fa0cac13a839bff00) fix: gateway client http requests (#684) + * [e4456e1f](https://github.com/argoproj/argo-events/commit/e4456e1f166f1c199f572deb1b07de4e0710a95c) feat: Introduce EventBus CRD. (#674) + * [6d7dfbae](https://github.com/argoproj/argo-events/commit/6d7dfbae7de15cf3da8e20d313765f90f0ec0fd4) feat: Support for using a live K8s resource in standard K8s triggers (#671) + * [09940439](https://github.com/argoproj/argo-events/commit/099404392d9019b7efbae4ff8e15b37acd65d1d1) fix: template metadata populated to gateway pod (#669) + * [bddc3981](https://github.com/argoproj/argo-events/commit/bddc39811aed353ad7fcf21e5e3aa3d554460c10) feat: Add support for polling (#611) + * [d570157a](https://github.com/argoproj/argo-events/commit/d570157a5c8e0022b0fea26e40bace509480f62e) feat: add affinity, tolerations in template and clusterIP in service. Closes #663. (#664) + * [6bc5dcfa](https://github.com/argoproj/argo-events/commit/6bc5dcfa3943b828864618abb2616cbeace60b35) feat: Unified gateway deployment image. Closes #639 (#648) + +### Contributors + + * Derek Wang + * Matt Brant + * Sam Neubardt + * Umi + * Vaibhav + * jannfis + * Štěpán Vraný + +## v0.15.0 (2020-05-08) + + * [4a3d61f9](https://github.com/argoproj/argo-events/commit/4a3d61f9d667d5e95d9c61eb0c2c199f1db2c7a8) fix: gateway and sensor service name (#651) + * [09f09126](https://github.com/argoproj/argo-events/commit/09f091267798d5088f8701e9452af92652eb61aa) fix: gateway and sensor resource names (#644) + * [27b34b66](https://github.com/argoproj/argo-events/commit/27b34b6601ae76226b380c65cd5b4eca67f72431) fix: K8s trigger patch operation (#643) + * [1db7e265](https://github.com/argoproj/argo-events/commit/1db7e265b1683462181ca987a4a4011acc0c66ad) feat: add headers http trigger (#642) + * [00594f8b](https://github.com/argoproj/argo-events/commit/00594f8b244951e410f0df980a7e06d2a28ea64b) feat(trigger-patch-op): added patch operation (#640) + * [5f2734be](https://github.com/argoproj/argo-events/commit/5f2734be95e4b2748d9b88ef3244810c6725e75a) Make gateway and sensor controllers backward compatible with deprecated specs (#633) + * [fb46335c](https://github.com/argoproj/argo-events/commit/fb46335c411f929a2966b576c601d5c3dcc948ee) feat: make deployment template in gateway obj optional. Closes #598 (#602) + * [ce9ae25a](https://github.com/argoproj/argo-events/commit/ce9ae25a123cc5264c48250b914cc28ed91eaa76) feat: make sensor deployment template spec optional. Closes #599 (#613) + * [336cb65a](https://github.com/argoproj/argo-events/commit/336cb65a412db9b5b1362f04534e28ac74e829d9) workflow namespace (#608) + * [cb1358de](https://github.com/argoproj/argo-events/commit/cb1358ded2cb58b07bb81acfc970ce24392f48a7) feat: custom trigger (#620) + * [6295610f](https://github.com/argoproj/argo-events/commit/6295610f5d14fb1b879a0d8ed80e063224e02abd) fix: Revert CRD apiVersion (#625) + * [521e7cc7](https://github.com/argoproj/argo-events/commit/521e7cc711079bc2184649067c503df40b5c50c2) feat: Unify manifests structure with Argo (#607) + * [be49e979](https://github.com/argoproj/argo-events/commit/be49e979a1f5e6173f572ce498cc5ea59866faeb) feat: enabling workflow identity for GKE (#593) + * [50f11031](https://github.com/argoproj/argo-events/commit/50f11031561ddc4adc97466fa00a97e893c1107c) feat: Set nested JSON properties in sensor payload (#606) + * [9f73febc](https://github.com/argoproj/argo-events/commit/9f73febc0e80d322359322aaf4087da2d925ac00) Fix RBAC EventSource aggregation (#601) + * [8f48f695](https://github.com/argoproj/argo-events/commit/8f48f695f96958423b32b6b2181206b56628d773) fix: use configured method for HTTP trigger (#604) + * [a20f6be9](https://github.com/argoproj/argo-events/commit/a20f6be95e3ca844dc64efcaa2f414110fc5a915) Align messages with renaming (#596) + * [d8e2b00b](https://github.com/argoproj/argo-events/commit/d8e2b00b3d606341968a4c3dc112fad4c138aa2a) Update gitlab.md + * [377fd1e1](https://github.com/argoproj/argo-events/commit/377fd1e1c6fa8afaffcf4be285b53aa60bacb62c) fix: image link + +### Contributors + + * Chase Terry + * Derek Wang + * Tim Hobbs + * Tomas Valasek + * Tomáš Coufal + * Vaibhav + * Vaibhav Page + +## v0.14.0 (2020-04-12) + + * [501bbb75](https://github.com/argoproj/argo-events/commit/501bbb75dc31cfd9db999ad29c2c7a0f4329d743) Update README.md + * [f28bb9d6](https://github.com/argoproj/argo-events/commit/f28bb9d629d3c5fa35e4f4d8566a2d42ea6ccce2) fix: installation and docs + * [53308eee](https://github.com/argoproj/argo-events/commit/53308eee95bb1d9493cb07e5be7f675ddf65721c) fix: update sprig template example (#589) + * [0f47f25b](https://github.com/argoproj/argo-events/commit/0f47f25b7be87bf24a2a83aaeb33f9bfdb8c8f69) Update trigger-with-template.yaml + * [65eaa7a0](https://github.com/argoproj/argo-events/commit/65eaa7a077c136cf270f077f0ad2052ccae88557) refactor: make namespace optional for cm trigger source (#586) + * [a72c7e60](https://github.com/argoproj/argo-events/commit/a72c7e60d21a858f918a8f6de03963ec88caef04) feat: added openwhisk trigger (#585) + * [f9de1105](https://github.com/argoproj/argo-events/commit/f9de1105d6437d51d7e484db876f4470be4b3e20) Update calendar.yaml + * [81d8555a](https://github.com/argoproj/argo-events/commit/81d8555ac9aa9830b8c181c2a0a859ccf73a48e5) fix: tests (#582) + * [fcf4944f](https://github.com/argoproj/argo-events/commit/fcf4944fc0b4f899eb4c20bb15fb3b68c763c102) feat: tls support (#580) + * [28e23ce2](https://github.com/argoproj/argo-events/commit/28e23ce27180b0e457034548da635e198ab0ca83) fix: object updates (#579) + * [244bb211](https://github.com/argoproj/argo-events/commit/244bb2111f27c5f18f7a7eaffc82044cdb5644b7) feat: add slack trigger (#576) + * [fd62a591](https://github.com/argoproj/argo-events/commit/fd62a591dd216b715a7173493ced8c67ef776a36) fix: get node nil exception (#573) + * [5e3fa39b](https://github.com/argoproj/argo-events/commit/5e3fa39bfd4de4e331a3f032f04fba066f266f85) feat: added label and field selectors for resource gateway (#571) + * [739f1694](https://github.com/argoproj/argo-events/commit/739f16948db4c5ad8626aa31137ff4ff1bd0f435) fix: use gateway namespace for secrets if namespace is not mentioned in event source (#569) + * [b1a3e2b5](https://github.com/argoproj/argo-events/commit/b1a3e2b5237ab92504defc16be46ec0061c96e40) feat: add list of event types for resource gateway (#561) + * [57586607](https://github.com/argoproj/argo-events/commit/57586607ef95dce2d8e64971842d550309301d7e) service account is needed to create pod in 2.6.3 (#564) + * [828a62d9](https://github.com/argoproj/argo-events/commit/828a62d9d58650d528b4b51ac097e1c4f2b1252f) Update resource.yaml + * [6a98b19d](https://github.com/argoproj/argo-events/commit/6a98b19d1db15834ea0529b2f37d1626f4ad37e7) Update resource.yaml + * [c16a1c59](https://github.com/argoproj/argo-events/commit/c16a1c597ff03b58d460307f50baede16fd059d4) feat: add basic auth to HTTP trigger (#559) + * [cad2834c](https://github.com/argoproj/argo-events/commit/cad2834c0433bbc5371bbd53270f3a9f7975b655) feat: Support dataTemplate and contextTemplate for Trigger Parameters (#543) + * [3f1f2b4e](https://github.com/argoproj/argo-events/commit/3f1f2b4e30cd033777a04b16145ca2ba3b604c61) Update README.md + * [0d5f437a](https://github.com/argoproj/argo-events/commit/0d5f437a2344c08a05c62e49c98f520f7949d956) Update gcp-pubsub.yaml + +### Contributors + + * Avi Zimmerman + * Chase Terry + * Vaibhav + * Vaibhav Page + * tunoat + +## v0.13.0 (2020-03-20) + + * [7f124e04](https://github.com/argoproj/argo-events/commit/7f124e0489fcafeea1a7989788d67176b0f2c8db) Update mqtt.md + * [e6491d3a](https://github.com/argoproj/argo-events/commit/e6491d3a383cddbcc7b556ddc0dcda117aad84a4) Update gitlab.md + * [393e0ca2](https://github.com/argoproj/argo-events/commit/393e0ca2bba3574b1c13227bcdf0a8a721bf4d3c) Update gitlab.md + * [32eba6f4](https://github.com/argoproj/argo-events/commit/32eba6f42f684c8d4b8fcb12c77c334b4b4414c6) feat: Add Data Filter Comparator (#544) + * [f0b1eda9](https://github.com/argoproj/argo-events/commit/f0b1eda9ebd4ede00071e41a7b5cd5ba6234c6ba) feat: add AWS-SQS cross account queue monitoring (#547) + * [2c82dd9a](https://github.com/argoproj/argo-events/commit/2c82dd9a32f66c451887382489e8a133b137404a) Update argo sqs example (#540) + * [7b0491d7](https://github.com/argoproj/argo-events/commit/7b0491d7529039c9e4ba4a291f7f188a96b28f7f) feat: support json body as an option [POC] (#542) + * [c8ef5762](https://github.com/argoproj/argo-events/commit/c8ef5762237123f276b245f16178ef2949e22a15) Update resource.yaml + * [4916c8b8](https://github.com/argoproj/argo-events/commit/4916c8b80f4566c67878494cbc607adca6bc55dd) feat(gitlab-gateway): check for duplicate webhooks (#537) + * [7f3198f8](https://github.com/argoproj/argo-events/commit/7f3198f825c3e5bd66488b514a72856034d377b2) feat(github-gatewa): added headers to event payload (#536) + * [bfc53e60](https://github.com/argoproj/argo-events/commit/bfc53e601521d74f792ba26e6d69cb554c1ea556) fix(nats-subscribers): fix gateway nats subscribers logic (#535) + * [60ee6183](https://github.com/argoproj/argo-events/commit/60ee6183806a8bbc4766aca3d768e9ac106c95fb) feat: support NATS trigger (#531) + * [0d41182a](https://github.com/argoproj/argo-events/commit/0d41182af3cd741d40a1b0eb3f5726d3d648aaed) feat(kafka-trigger): added kafka trigger tests (#533) + * [f808d96d](https://github.com/argoproj/argo-events/commit/f808d96d2e317808c087ffa64959c201b94b852c) Kafka trigger (#530) + * [d0d11e4e](https://github.com/argoproj/argo-events/commit/d0d11e4e85e94efe2d2c6946c1939e660b3ab45d) feat(add-argo-rollouts): argo rollout as a trigger (#529) + * [a8363437](https://github.com/argoproj/argo-events/commit/a8363437420c61afac2dc7cac4dac87b146fd2fe) chore(): update argo workflow version to include cron workflows (#528) + * [1a70502b](https://github.com/argoproj/argo-events/commit/1a70502b73dae13329b4c6fcc5377ebcccadaecd) feat: clean up openfaas trigger (#526) + * [698dd2a3](https://github.com/argoproj/argo-events/commit/698dd2a3a74bf145d6936a9051f6c3f265631b91) feat: update biobox presentation link to slideshare (#525) + * [08b7ae74](https://github.com/argoproj/argo-events/commit/08b7ae74eac77192e5df4090f8cd883618911e50) Add git to Dockerfile yum packages (#521) + * [2e781d33](https://github.com/argoproj/argo-events/commit/2e781d339da90c7e73f178c200ee08e6edd7e9ba) feat: Implemented Assume RoleARN for SQS and SNS (#519) + * [95e393ba](https://github.com/argoproj/argo-events/commit/95e393ba2bfbc9d7cb1369070ca03712c00b1c64) Update wehbook.md + +### Contributors + + * Antonio Macías Ojeda + * Chase Terry + * Ian Munoz + * Julian Mazzitelli + * Matt Brant + * Saravanan Balasubramanian + * Vaibhav + +## v0.13.0-rc (2020-02-25) + + * [bd06d627](https://github.com/argoproj/argo-events/commit/bd06d627906410e629d456ca4860439af25e8df5) fix: revert to travis (#509) + * [c9fa553d](https://github.com/argoproj/argo-events/commit/c9fa553d71749dd7de897967c75b4bece556326f) fix: ci + * [10e50e9d](https://github.com/argoproj/argo-events/commit/10e50e9dfb8c5a944b397d8a31039dff7dd7aa84) fix: ci + * [ac74be44](https://github.com/argoproj/argo-events/commit/ac74be44e1a928ca8699a01408ce39c47ba59cce) fix: ci + * [fcc36034](https://github.com/argoproj/argo-events/commit/fcc36034b5bbeb0e734bd15ae18aa7b2b2d86777) feat: added circle ci (#508) + * [943895e6](https://github.com/argoproj/argo-events/commit/943895e6732521d7e45d3a46fbce693e4ea997e5) docs(): fix links + * [688e22de](https://github.com/argoproj/argo-events/commit/688e22de2a2daead3894e857bd043004b111d09d) docs(): gateway setup (#507) + * [97d4ca77](https://github.com/argoproj/argo-events/commit/97d4ca771a650fd5753c1e814dc1abeb1f39502c) refactor: update event and sensor type (#506) + * [3883e47f](https://github.com/argoproj/argo-events/commit/3883e47f8a36c3ab69b3037e68090dfef72a1de7) Custom triggers (#493) + * [22de2c47](https://github.com/argoproj/argo-events/commit/22de2c470487dcd3f6e2255727e522b305df2d4c) feat(): add linter (#501) + * [efa27907](https://github.com/argoproj/argo-events/commit/efa27907afcd5c977abe55c3a080bee91ee8fb36) Update kafka.yaml + * [c2df1c8a](https://github.com/argoproj/argo-events/commit/c2df1c8a1924de9252fae149a66426b26cc58257) fix(): resource gateway filter (#499) + * [59cc8019](https://github.com/argoproj/argo-events/commit/59cc8019be172c652501ee8c8f8d06704fba5c5a) fix(resource-gateway): fix test + * [d4460d76](https://github.com/argoproj/argo-events/commit/d4460d76da5b912b3b6c378b34c5bd44dd299efc) fix(resource-gateway): fix event type filter (#498) + * [41a804a2](https://github.com/argoproj/argo-events/commit/41a804a2fb04d5571dece378e3de11bfe6c32a92) patch v0.12.1 (#497) + * [736058ac](https://github.com/argoproj/argo-events/commit/736058ac5bbbc3ec66c3afddf35224667f55da5f) fix sensor examples (#496) + * [9c83f063](https://github.com/argoproj/argo-events/commit/9c83f0638e7350e3f87e30badf4d562e5e52a72c) Update minio.yaml + * [2ac99a48](https://github.com/argoproj/argo-events/commit/2ac99a4800e0c3f5897bebecf710db090405b257) valid gcp-pubsub gateway type plus validation of parameter (#492) + * [21cb5b5d](https://github.com/argoproj/argo-events/commit/21cb5b5d0780549bd2837f408683c6fa6335f2da) Update gateway responses (#486) + * [a30b3fee](https://github.com/argoproj/argo-events/commit/a30b3fee525d42752cfc9f12119a45772e8a4ba1) Fix sqs example event source (#484) + * [16f84d17](https://github.com/argoproj/argo-events/commit/16f84d171bb572588f33055da4cc2c7638174a31) Fix typo in github event-source example (#477) + * [fcd7304b](https://github.com/argoproj/argo-events/commit/fcd7304b37eb2a8c27ad6e39a230d4b5ad36562c) Add method attribute to all webhook-derived EventSource examples (#480) + * [aab78ca0](https://github.com/argoproj/argo-events/commit/aab78ca0a538856be58fdf84b3171cd089b500d6) docs(): adding name to helm command (#483) + * [702f3a1c](https://github.com/argoproj/argo-events/commit/702f3a1c9f0084ffb0ecbbddfc1c88c197a9002b) Update README.md + * [019628a9](https://github.com/argoproj/argo-events/commit/019628a9ee914de015dd2cf2974463236f775a16) fix(k8s-update-operation): fix the k8s update operation (#476) + * [a069dd83](https://github.com/argoproj/argo-events/commit/a069dd836dcab43f09b5a1ebf1a9a6d56d897e26) feat(add-generic-event-source): generic event source definition (#467) + * [a6410054](https://github.com/argoproj/argo-events/commit/a6410054367d556450c1a6327f4bd4e753057dc7) Add missing path on tutorials (#474) + +### Contributors + + * Art Begolli + * Evan Seabrook + * Gaetan SENELLE + * Giorgio Azzinnaro + * Vaibhav + * VaibhavPage + * Zach Aller + * hongkunyoo + +## v0.12 (2020-01-16) + + * [1ce40a16](https://github.com/argoproj/argo-events/commit/1ce40a16da468fa0c2f2eec7cedfbcc6ba71b097) Update index.md + * [45dd4b8b](https://github.com/argoproj/argo-events/commit/45dd4b8b845cd8341cb75790e013f1a95a5b026f) Update README.md + * [07cabc72](https://github.com/argoproj/argo-events/commit/07cabc72f322e0df2d0f9912bfc066ab0628c07c) fix: docs (#466) + * [437cb88b](https://github.com/argoproj/argo-events/commit/437cb88b1b840cd7402c048c6b5ac0e927caf3e8) feat: HTTP, AWS Lambda and OpenFaas triggers (#465) + * [4c2b912c](https://github.com/argoproj/argo-events/commit/4c2b912c96e89a9b816a266847c32d8dd6a0a47f) gitlabBaseURL keyword change (#461) + * [dafdab3e](https://github.com/argoproj/argo-events/commit/dafdab3ecbc42ea467aa3957c3a5b60e0c113cde) Update mqtt.yaml + * [d7424340](https://github.com/argoproj/argo-events/commit/d742434031cdcfcaceba7d1af5a8cd1d06bfcb23) Update nats.yaml + * [35fee00f](https://github.com/argoproj/argo-events/commit/35fee00f711f6679cb0d30964916afbfb620afbc) Update kafka.yaml + * [9d8b8222](https://github.com/argoproj/argo-events/commit/9d8b8222de590f5d4b08d3c101c83ef1068d388d) Update amqp.yaml + * [fc409904](https://github.com/argoproj/argo-events/commit/fc4099043498e102c73c264a4cad026904782c98) Fix gcp pubsub example (#458) + * [b4aeb6f4](https://github.com/argoproj/argo-events/commit/b4aeb6f4f9f6f44e8a41d40d346bda02470d3c38) feat: Generate OpenAPI specs as part of the build (#405) + * [d2d8db1b](https://github.com/argoproj/argo-events/commit/d2d8db1b150aec940f4989cdfd03d15b4ac309c1) fix(stream-gateways): default connection backoff (#457) + * [b56b5b29](https://github.com/argoproj/argo-events/commit/b56b5b2934b420b2a685a0064d4900bfa372202d) feat(update-operation): support update operation for triggers (#456) + * [21cd57b0](https://github.com/argoproj/argo-events/commit/21cd57b0bf51d7985f59d7d2d5020e2a8be41cbe) feat(): NSQ gateway (#455) + * [27b591c6](https://github.com/argoproj/argo-events/commit/27b591c622972b6d28b15d5e30b5b60d91d902e0) Update ROADMAP.md + * [0a6d8699](https://github.com/argoproj/argo-events/commit/0a6d8699c5da6388cff7e2230d0c51d566c8e650) Redis gateway (#454) + * [cd1600a1](https://github.com/argoproj/argo-events/commit/cd1600a1177fc04cd52f449b9b9a107cfb79645e) feat(emitter-gateway): added emitter gateway (#451) + * [c2e189ce](https://github.com/argoproj/argo-events/commit/c2e189ce5db3a86a283786bce295995979314627) Update custom-boilerplate.go.txt + * [9d4a5c52](https://github.com/argoproj/argo-events/commit/9d4a5c52dcd332a4fa462f739fc28d7e134ad6b7) feat(stripe-gateway): added stripe gateway (#450) + * [6ce129bf](https://github.com/argoproj/argo-events/commit/6ce129bf60e20f71832e3405b812157a32a323f6) Update ROADMAP.md + * [4e1f071f](https://github.com/argoproj/argo-events/commit/4e1f071f29fa7b7cd3225f31bd9d5991909d2f08) fix(azure-events-hub-gateway): added azure events hub gateway (#449) + * [7bc21d81](https://github.com/argoproj/argo-events/commit/7bc21d811ff237071a719d0c1865fe253354193e) feat: Simplify event dispatch (#448) + * [72ed7edf](https://github.com/argoproj/argo-events/commit/72ed7edfe3a6c7fa62d1d1ab2001cfcaf5f6aa0d) fix: update installation doc (#447) + * [78e79e0a](https://github.com/argoproj/argo-events/commit/78e79e0ad7462930867c449681611863046c4a82) Changes to enable passing labels and annotations to service exposing sensor through eventProtocol (#440) + * [cc8914d6](https://github.com/argoproj/argo-events/commit/cc8914d6171dae5bf39ee98e4304b8111c8a16b5) feat: support https sns protocol (#415) + * [2bdb3848](https://github.com/argoproj/argo-events/commit/2bdb3848a5a1958c8a3762fcbe04248bc9e4b9d5) fix: Update cloud events (#437) + * [b0e98792](https://github.com/argoproj/argo-events/commit/b0e98792c9e070f7a61fb1b298caf251991a4384) Update installation.yaml + * [7ea642ff](https://github.com/argoproj/argo-events/commit/7ea642ff69e6116bb68bd4659579a891c590a49d) fix(update-examples): fix streaming event sources backoff key (#445) + * [89f48af6](https://github.com/argoproj/argo-events/commit/89f48af6f34ab9c9a3d77d691be142c72b2197a1) Update ROADMAP.md + * [39ffb30f](https://github.com/argoproj/argo-events/commit/39ffb30fc584385ab55b0802fc87627938735aca) Update ROADMAP.md + * [10ae0e0a](https://github.com/argoproj/argo-events/commit/10ae0e0a731feddb0185fd54d484c5fef7c90f80) (fix): MODIFIED -> UPDATED (#432) + * [596fda67](https://github.com/argoproj/argo-events/commit/596fda677d9a8ce9f50cf9641afc1149f1fa1300) fix RBAC roles (#436) + * [f33d8a75](https://github.com/argoproj/argo-events/commit/f33d8a7556266b0b15e876a12fed645a30c97da2) Update kustomization.yaml (#435) + * [c00e5b2b](https://github.com/argoproj/argo-events/commit/c00e5b2b3480e15e682d3cc48b30ff4f3802ce89) Update ROADMAP.md + * [23b8e353](https://github.com/argoproj/argo-events/commit/23b8e35393901dad55258036712e208aafe4eddd) Update ROADMAP.md + * [d34f923e](https://github.com/argoproj/argo-events/commit/d34f923eb3ec8c2f3906f8839fe289f8276541c6) Update gateway.md + * [bd028217](https://github.com/argoproj/argo-events/commit/bd02821719203c2b43c8d63b7e5259d6b4a59dc7) chore(update-examples): pin versions of gateway and sensor (#434) + * [4f685b21](https://github.com/argoproj/argo-events/commit/4f685b21c25524025f36decee60cd6d7d396d548) fix: userPayload JSON should not be a string (#430) + * [c63e6045](https://github.com/argoproj/argo-events/commit/c63e60457a4d104fd5e2dbbe5980892a3e7c477c) chore(update-mkdocs): site structure (#427) + +### Contributors + + * Andrew + * Apoorv Bhawsar + * Clément + * Devin Stein + * Hunter Kelly + * Shreyam Sinha + * Vaibhav + * Zach Aller + * hongkunyoo + +## v0.12-rc (2019-12-11) + + * [8c82938f](https://github.com/argoproj/argo-events/commit/8c82938f50312f7919bfaa5e8af05c7a8d2f98e7) chore(update-examples): update minio gateway example (#426) + * [65ffc718](https://github.com/argoproj/argo-events/commit/65ffc718bfe09f34523d9af65f8355ffc1fea17e) Update developer_guide.md + * [34f86661](https://github.com/argoproj/argo-events/commit/34f86661924177f72447f57a24cf413ae14e8cc2) Update developer_guide.md + * [e9c11950](https://github.com/argoproj/argo-events/commit/e9c1195042697fc25da07de20c6c50c32c4b7bfe) InsideBoard uses Argo Events (#424) + * [b839fd05](https://github.com/argoproj/argo-events/commit/b839fd052e19aa80d538190a532ebb206ed527ad) Release 0.12 (#423) + * [55af2e49](https://github.com/argoproj/argo-events/commit/55af2e4976aad14831fb115cfe3320bd7ee60501) who uses: add biobox analytics (#422) + * [1e4dfe41](https://github.com/argoproj/argo-events/commit/1e4dfe4172e585c04aa201e8fbbd913c92870482) Update README.md (#421) + * [4d8f120a](https://github.com/argoproj/argo-events/commit/4d8f120abbb615060da68290f8180bcede6a92f0) Clarify & indicate Helm 3 incompatibility (#416) + * [dcbed09a](https://github.com/argoproj/argo-events/commit/dcbed09aceb75f18742b90650c2247d67987f35c) Update README.md (#420) + * [3a71e7c2](https://github.com/argoproj/argo-events/commit/3a71e7c286c663be97b6f02e5fe937269d353a8c) Change Viaduct.ai to Viaduct (#418) + * [08242d43](https://github.com/argoproj/argo-events/commit/08242d43c1cfc1a86d9612f7af4601c16638287e) Adding fairwinds to users (#419) + * [ef9fe6d6](https://github.com/argoproj/argo-events/commit/ef9fe6d63f329e6f6a591a37e7e5c249c7b0a2ee) Add Viaduct.ai to who uses argo events section (#417) + * [1af24f0f](https://github.com/argoproj/argo-events/commit/1af24f0f8078d0b039973dd2bf872b1fb8cd2690) Update CODE_OF_CONDUCT.md + * [3d7a8201](https://github.com/argoproj/argo-events/commit/3d7a8201c3815af25112c93c8eece9df092132be) Update OWNERS (#411) + * [a44a2c55](https://github.com/argoproj/argo-events/commit/a44a2c554b4f9ba41e0c06e20e661c1ba23c1332) Update README.md + * [58664327](https://github.com/argoproj/argo-events/commit/586643278328efe13e2fb70492abc46f3dc699a1) Update README.md + * [3cdc92fa](https://github.com/argoproj/argo-events/commit/3cdc92fab20f809c1279e419b6d03b4d83f1cd77) refactor(): update sensor examples (#400) + * [6bc863fe](https://github.com/argoproj/argo-events/commit/6bc863fe2dd3b740c73f27359eddfe3b404e4d65) refactor(): incremental changes (#399) + +### Contributors + + * Aaron Curtis + * Andrew Suderman + * Devin Stein + * Ed Lee + * Greg Roodt + * Julian Mazzitelli + * Saradhi Sreegiriraju + * Vaibhav + * descrepes + +## v0.11 (2019-11-08) + + * [2704f968](https://github.com/argoproj/argo-events/commit/2704f968801c35de8ac4c68b5ae0e75cf08c9859) chore(): update argo workflows to 2.4.2 (#397) + * [53dc7076](https://github.com/argoproj/argo-events/commit/53dc7076b03ab5afa022b33d840e302b3285963c) feat(): support regex for data filters (#396) + * [62569f35](https://github.com/argoproj/argo-events/commit/62569f35f1d2920bb274e0821366f739d6a106a4) fix uuid with correct return value (#390) + * [05320c6f](https://github.com/argoproj/argo-events/commit/05320c6fd26e52480141e79d12471f9a884b11ce) Update ROADMAP.md + * [b377af72](https://github.com/argoproj/argo-events/commit/b377af7258c16d2100a7e6109fe7c9a6a212a722) Update README.md (#392) + * [7d2a1f4e](https://github.com/argoproj/argo-events/commit/7d2a1f4e5aadb322190d951ae2bee9354d1f219e) Update README.md + * [3ee56185](https://github.com/argoproj/argo-events/commit/3ee5618509e7e33a5678c541929f26959f9a2074) Update README.md + * [ea04180e](https://github.com/argoproj/argo-events/commit/ea04180ed6509d55d7c292898e13b551fb250e94) Aggregate gateway and sensor permissions to admin/edit/view roles (#382) + * [07ab6a3b](https://github.com/argoproj/argo-events/commit/07ab6a3bca50eeafc6a4ed4d06aca988715fc918) Reworking grammar in index.md and sensor.md (#386) + * [afbdba60](https://github.com/argoproj/argo-events/commit/afbdba604ef16c93f64612fbd147380f78cb9a1a) Event Sensor got panics if gateway not existed selected in dependency (#383) (#384) + * [f9fa18b8](https://github.com/argoproj/argo-events/commit/f9fa18b8a17877324aba0f25fd027f5d26bc51cc) added namespace to create obj (#385) + * [70cca52c](https://github.com/argoproj/argo-events/commit/70cca52c76443936607a55e596fbc6e83b82cc35) Add rateLimit option to amqp gateway (#353) (#378) + * [13cb29de](https://github.com/argoproj/argo-events/commit/13cb29de8da834c5da7fac8a78b91bf4e8dbba6b) 363 update the SQS gateway documentation (#371) + * [b7e13bfc](https://github.com/argoproj/argo-events/commit/b7e13bfc23f14c785f320f5a29fc70aac6c817bb) Update README.md + * [9328532c](https://github.com/argoproj/argo-events/commit/9328532c19f0ef9812f7e5779f2736137bed0cb2) Update README.md + * [98771772](https://github.com/argoproj/argo-events/commit/987717729ab36b09ea05dccd9c37b78bdd05c14b) Update deps (#370) + +### Contributors + + * Don Mayo + * Jeremy Solbrig + * Makunouchi + * Ondrej Smola + * SHRJA + * Saradhi Sreegiriraju + * Vaibhav + * XiangChen666 + * catalinvr + +## v0.10 (2019-10-06) + + * [3b9f64e2](https://github.com/argoproj/argo-events/commit/3b9f64e2f6b8373f27d37d481c72c5a6087cd118) chore(): release v0.10 (#369) + * [aff4dc14](https://github.com/argoproj/argo-events/commit/aff4dc14f06b8ff49cfd6f278fd01686b9cb1125) Omit duplicate CRD for workflows (#368) + * [9605b8b6](https://github.com/argoproj/argo-events/commit/9605b8b6eeefbc7143c756bcd4daf945d7593e8a) Support adding Github hooks without requiring a Github webhook id to be hardcoded (#352) + * [01693817](https://github.com/argoproj/argo-events/commit/01693817f11cf51483ab0f033e89dce2d3f44451) Add kustomization.yaml (#349) + * [6dabd55c](https://github.com/argoproj/argo-events/commit/6dabd55c48a1a5f955e9de2efa7ab66f2ae5038f) fix circuit bug (#327) + * [b33f3d8f](https://github.com/argoproj/argo-events/commit/b33f3d8f8d47c503456355ba7ad9dce0fcb301a9) Fix missing resource group (#337) + * [01c2bff9](https://github.com/argoproj/argo-events/commit/01c2bff9264ab4feea6bdaea35eef3843f1e4e62) Allow watching of resources in all namespaces at once (#334) + * [e573cd4b](https://github.com/argoproj/argo-events/commit/e573cd4b197b12d3f79ff5d4f76d54320201da06) Apply resource parameters before defaulting namespace (#331) + * [649847c9](https://github.com/argoproj/argo-events/commit/649847c964653051eef11e6ee2bbdd1d5c22a0f3) Pub/Sub: multi-project support (#330) + * [f02d2cd7](https://github.com/argoproj/argo-events/commit/f02d2cd7bdac456bb8ba6b3de4a97831c403c7af) Feature/support slack interaction actions (#324) + * [6b628ac2](https://github.com/argoproj/argo-events/commit/6b628ac2e9eaf40b5affd652b982236dc6815713) Gcp Pubsub Gateway Quality of life Fixes (#326) + * [2fffc3cf](https://github.com/argoproj/argo-events/commit/2fffc3cf476ff54e0623d53f45a0e7eac75e8803) Rename event-source of example gcp-pubsub to match gcp-pubsub gateway (#325) + * [6a5a6222](https://github.com/argoproj/argo-events/commit/6a5a62227da9ab9c8a8e7ea3dea8653d5b3e707e) Support fetching/checkouts of non-branch/-tag git refs (#322) + * [30aedd16](https://github.com/argoproj/argo-events/commit/30aedd1619cbb8658addd293afbfb795bab9e675) Update index.md (#323) + * [4c0d5ed1](https://github.com/argoproj/argo-events/commit/4c0d5ed188aa2a1f4aac2cafcd8efa6a547dcb66) fix timezone bug (#321) + * [0d7e986b](https://github.com/argoproj/argo-events/commit/0d7e986bcb08ea065fc7f255ec132ce643941ebf) added ability to send events sensors in different namespace (#317) + * [1d51b87b](https://github.com/argoproj/argo-events/commit/1d51b87bd54682f26100ab622e04bc30cdb31d97) Support different trigger parameter operations (#315) (#319) + * [f54d6513](https://github.com/argoproj/argo-events/commit/f54d6513ec38c6fd74e57c7f9cf4d9e831c07daa) Added ability to refer the eventSource in a different namespace. (#311) + * [95ecb9ba](https://github.com/argoproj/argo-events/commit/95ecb9bae971d9e3bc4777d7fbfe460226b64cd3) Fix typo (#312) + * [4955664c](https://github.com/argoproj/argo-events/commit/4955664c228281afa08295693a60cc827874af8f) Fix container name of example gcp-pubsub-gateway (#316) + +### Contributors + + * Aditya Sundaramurthy + * Daniel Duvall + * Don Mayo + * Florian Peter + * Jt Miclat + * Justin Taylor-Barrick + * Marek Čermák + * Michael Goodness + * Terese Haimberger + * Theodore Messinezis + * Vaibhav + * elyzov-plesk + +## v0.9.3 (2019-06-28) + + * [9ecdd384](https://github.com/argoproj/argo-events/commit/9ecdd384ffbcf5fe1ff70572e56c70720105c819) Enable creating AWS sessions without providing explicit credentials f… (#300) + * [c61c8240](https://github.com/argoproj/argo-events/commit/c61c824078efb0f25af1c3b31989966057f4be2b) Wildcards in sensor dependencies (#296) + * [4a769cb5](https://github.com/argoproj/argo-events/commit/4a769cb50aeed278f9cf3620631dff4b263947ee) Added ability to use JSON objects in the userPayload field for calendar events (#295) + * [f63ec3d6](https://github.com/argoproj/argo-events/commit/f63ec3d6ad6e5f76801d22d845cab0e5eef91ec9) Make example consistent with documentation (#290) + * [6046e158](https://github.com/argoproj/argo-events/commit/6046e158aab84f3e611027cb8a3f937222c9b6b4) Make the instructions more explicit (#288) + * [75a296de](https://github.com/argoproj/argo-events/commit/75a296de86d189d2996527a85513e52e16c92148) Fix git trigger example link in documentation (#286) + * [c7d8b1e1](https://github.com/argoproj/argo-events/commit/c7d8b1e1aacf8de8a27615ca9d724792d325e8cd) Set timeout and and connection limit (#284) + * [81a871c6](https://github.com/argoproj/argo-events/commit/81a871c6b2b4cbd4b71a699616a4819571000440) fix logo (#282) + * [2a4a8ad4](https://github.com/argoproj/argo-events/commit/2a4a8ad4e1e91cbd42d31ada00b58fdecb3c9f2d) Fix slack sensor example (#280) + * [4884b370](https://github.com/argoproj/argo-events/commit/4884b370313ced52a3fffeb6371f976efbabd465) Fix docs (#279) + +### Contributors + + * Dat Truong + * Edwin Jacques + * Hemil Desai + * Matthias Popp + * Nathan Essex + * Vaibhav + * elyzov-plesk + +## v0.9.2 (2019-04-25) + + +### Contributors + + +## v0.9.1 (2019-04-25) + + * [b68b94ab](https://github.com/argoproj/argo-events/commit/b68b94abb38b8318bd97c6a0a86873e2accad07f) Resource version validation (#268) + * [68bbdd47](https://github.com/argoproj/argo-events/commit/68bbdd47b953258903092830d0a7dbe682c27ca9) fix gcp-pubsub-gateway (#270) + * [9122d507](https://github.com/argoproj/argo-events/commit/9122d50783511e7d08e9020f27249899fa015fe2) Support any K8s spec as trigger source (#259) + * [2a5f965f](https://github.com/argoproj/argo-events/commit/2a5f965ff704bab535e6a753d999232b9b00b9e5) add trigger backoff and execution policy (#256) + * [4099873a](https://github.com/argoproj/argo-events/commit/4099873a84b2b45aa42d7ef57f0ce6e82178e8e6) fix validating payloads from github (#263) + * [74d0443e](https://github.com/argoproj/argo-events/commit/74d0443e23538b278d9a4603d26534c978658fba) lower the wait timeout to a valid value (#260) + * [3f4e7859](https://github.com/argoproj/argo-events/commit/3f4e78599c4b85ab66bef1d57588d2cdba4de4b5) fix missing : (#257) + * [d3732ff3](https://github.com/argoproj/argo-events/commit/d3732ff3d6df414e06b1d9edee24c3cde128e994) Improve logging (#253) + * [0019098f](https://github.com/argoproj/argo-events/commit/0019098f333aa964c4380b6dd5a69afb12a1dcdf) fix(): git trigger auth - #255 + * [963abc2e](https://github.com/argoproj/argo-events/commit/963abc2eaa7acc679cda9c378ef51c2d596f7e94) E2e tests (#249) + * [806d3e52](https://github.com/argoproj/argo-events/commit/806d3e52f83fa99d9eb800479adc12d5b19e5a7e) docs(): release v0.9 update (#252) + * [5084e6d8](https://github.com/argoproj/argo-events/commit/5084e6d805c31042bb109156f4d0c8d7214194c5) Refactor webhook route config (#243) + * [cb6aa090](https://github.com/argoproj/argo-events/commit/cb6aa0901a25a2d1a977e1a1c31d4fa830fe320a) Update docs (#225) + +### Contributors + + * Daisuke Taniwaki + * Miyamae Yuuya + * Nick Stott + * Seiya Muramatsu + * Vaibhav + +## v0.8.3 (2019-03-18) + + * [a913dafb](https://github.com/argoproj/argo-events/commit/a913dafbf000eb05401ef2c847b29152af82977f) Support applying parameters for complete trigger spec (#230) + * [d8785c09](https://github.com/argoproj/argo-events/commit/d8785c096ba3dffa69da3c9c20a2a052ec50765c) github-gateway: Add delivery headers (#236) + * [af1f8557](https://github.com/argoproj/argo-events/commit/af1f8557caaed0f081bac3a9172c51c793f65ffb) Add tests (#220) + * [9efd6a1f](https://github.com/argoproj/argo-events/commit/9efd6a1f7f9bd6ec01afa938b1fdefcaf5ee04ff) manage sensor status nodes (#227) + +### Contributors + + * Johannes 'fish' Ziemke + * Vaibhav + +## v0.8.2 (2019-03-13) + + * [7554db9c](https://github.com/argoproj/argo-events/commit/7554db9cbd58e729d5357ea91a41cd9fdfcf703f) Fix goroutine error in test (#229) + * [5fb8ba13](https://github.com/argoproj/argo-events/commit/5fb8ba1301f0dc0562b998d9b1a0c5cc0b6d12b2) Fix operator tests (#228) + * [ec515595](https://github.com/argoproj/argo-events/commit/ec515595de5bba8dd162c844b3528d1ac68daa90) Handle empty namespace on trigger resources (#224) + * [2921f78c](https://github.com/argoproj/argo-events/commit/2921f78c5448c4146a6505755229714bf42473d1) gateway/github: Enterprise support and some fixes (#221) + * [11cd17d5](https://github.com/argoproj/argo-events/commit/11cd17d56ba39d1a460b187b3fb74ef76bfa5884) Fix codegen (#217) + * [cb8ebd80](https://github.com/argoproj/argo-events/commit/cb8ebd808beae5c6a760b6a62c08710a4c6c1c7b) Child resource recreation (#208) + * [e362cbc0](https://github.com/argoproj/argo-events/commit/e362cbc007bed58aa6a392892c214056ec5d9fcc) Cleaning up go report card; linting; adding comments (#219) + +### Contributors + + * Daisuke Taniwaki + * Johannes 'fish' Ziemke + * Matthew Magaldi + +## v0.8.1 (2019-03-10) + + * [45bf2f04](https://github.com/argoproj/argo-events/commit/45bf2f04063e4b7f2271d6aaebaaeae942deb8a3) fix(): add url to webhook (#216) + * [eb66f3dc](https://github.com/argoproj/argo-events/commit/eb66f3dc2510809c6972c31f8bbd8c2a7209c52a) Adding godoc and license badges to README (#218) + * [2660aab4](https://github.com/argoproj/argo-events/commit/2660aab4acbac8bce56ef4ee12e5028d8a3df3c1) fix(): remove user path (#215) + * [3d7a94fd](https://github.com/argoproj/argo-events/commit/3d7a94fd9875007eb5b23da68c55669dc922ae00) Improve file gateway (#213) + * [a6811807](https://github.com/argoproj/argo-events/commit/a68118076fdf3e54ab63864661b25f4048327e3f) fix(): trim white spaces in creds stored in k8s secret (#211) + * [ce507d45](https://github.com/argoproj/argo-events/commit/ce507d45e0263315233b07de3b244fb345b34f72) fix(): fix the git trigger branch or tag switch bug (#205) + * [d1c81209](https://github.com/argoproj/argo-events/commit/d1c812099e086de37dd77160228dc360c27d4dff) feature(): making webhook secure (#206) + * [9204aeae](https://github.com/argoproj/argo-events/commit/9204aeae1964fdf290660ea533f4d7f9d179de8f) adding coveralls to travis ci build (#200) + * [cb5bb932](https://github.com/argoproj/argo-events/commit/cb5bb9325639885fe2db88ef4104c6b6f7edf59f) fix(): tests (#198) + * [224e3339](https://github.com/argoproj/argo-events/commit/224e33391ceadcf90971cc803ad8c9b370a6bdfa) adding .travis.yml build file (#197) + * [b3d37674](https://github.com/argoproj/argo-events/commit/b3d376741b778b05261359e5dd40ea9eda593821) Trello gateway (#193) + * [de047acb](https://github.com/argoproj/argo-events/commit/de047acb255f25239ead983bb96a21066a54a1e2) Aws sqs gateway (#192) + * [ff3a4fba](https://github.com/argoproj/argo-events/commit/ff3a4fba4c0daa9433d52ee98b506162ac7e515c) Update trigger guide (#196) + * [2b52d006](https://github.com/argoproj/argo-events/commit/2b52d006cd943fa99250e84eaa8f58fbefee66bb) fix webhook sensor example (#195) + * [7c89f755](https://github.com/argoproj/argo-events/commit/7c89f755b58f115eda79bf5f848bd867cfdb1dcf) Support regexp path (#191) + * [f2cf46f6](https://github.com/argoproj/argo-events/commit/f2cf46f6157ae37912fb1ed897771cef914644c3) fixing asset links to point to master branch (#190) + * [dd1a1a43](https://github.com/argoproj/argo-events/commit/dd1a1a4315957cd1bc2a16b0665054862bcc384c) adding code of conduct; moving assets into separate docs directory (#189) + * [a1fa7002](https://github.com/argoproj/argo-events/commit/a1fa700250440d56c7cdf557bbae56af7cce2392) feature(): support event type in resource gateway (#187) + * [92968e2d](https://github.com/argoproj/argo-events/commit/92968e2d987800eb053d7bd0249f02bfee948a90) Implement HDFS gateway (#180) + * [3eee3c96](https://github.com/argoproj/argo-events/commit/3eee3c96a09ae85fcd1e4ad7f2dd0624ec354f41) Fix resource gateway (#185) + +### Contributors + + * Daisuke Taniwaki + * Matthew Magaldi + * Vaibhav + * jaimejorge + +## v0.8 (2019-02-27) + + * [d5c4871c](https://github.com/argoproj/argo-events/commit/d5c4871c896be955a3e7a70f781acc295be62461) Fix typo in resource-gateway doc (#173) + * [212979c1](https://github.com/argoproj/argo-events/commit/212979c16d20f39e597f5a62def8cfd060f34cb6) modify instalation link (#182) + * [506cc70a](https://github.com/argoproj/argo-events/commit/506cc70a3b0c851a37e357e3501d06251261aa33) Allow to overwrite variables in Makefile (#181) + * [72fc6f2f](https://github.com/argoproj/argo-events/commit/72fc6f2ffe71e0f8a7de0d5dedf4b836a79815f8) Slack gateway (#177) + * [e0242e7b](https://github.com/argoproj/argo-events/commit/e0242e7b00f7edc7e0cff314abd3a728ddf3c62b) Update README.md (#179) + * [5023ee6a](https://github.com/argoproj/argo-events/commit/5023ee6a08e022797c782c7d1a3f72019c5e9f04) fix typo, 'which' (#178) + * [2e64f672](https://github.com/argoproj/argo-events/commit/2e64f672ccb209db99001a233146a084329c92d8) Implementing GCP PubSub Gateway (#176) + * [1609f499](https://github.com/argoproj/argo-events/commit/1609f4992e8ca77cc4259842fc906121928b2a29) Enhance github and gitlab gateway (#172) + * [ac66a69b](https://github.com/argoproj/argo-events/commit/ac66a69bb09847b5369b4aeb74738ec43d13b6bf) Git as trigger source (#170) + * [c98e35c7](https://github.com/argoproj/argo-events/commit/c98e35c78ce163fd6302e8af4041314878052e23) AWS SNS Gateway (#169) + * [fce79f1f](https://github.com/argoproj/argo-events/commit/fce79f1f28cfb97dc332604e54a4620a9cad40f7) Added support for backoff option when making connections in stream gateways (#168) + * [b09c4055](https://github.com/argoproj/argo-events/commit/b09c40555420b7ad9c0692202d710c207480202b) Enhance data filter (#167) + * [fb3da761](https://github.com/argoproj/argo-events/commit/fb3da761f8626a644212276846eb8d5afb7e2591) typo (#166) + * [74272fb9](https://github.com/argoproj/argo-events/commit/74272fb919eaf237e82962a2e9868a7da469315b) Ability to add timezone and extra user payload to calendar gateway (#164) + * [89897386](https://github.com/argoproj/argo-events/commit/89897386b74f62c61baf0d5dc320dab6ead67428) Boolean circuit for event dependencies (#162) + * [6d3d3aea](https://github.com/argoproj/argo-events/commit/6d3d3aeaa0b4f957faed3a8f14a2641c796e3ae2) Update api common (#161) + +### Contributors + + * Daisuke Taniwaki + * Hideto Inamura + * Jason Whitlark + * Matthew Magaldi + * Nick Stott + * Saradhi Sreegiriraju + * Vaibhav + * etheleon + +## v0.7 (2019-01-29) + + * [349ff862](https://github.com/argoproj/argo-events/commit/349ff8627fde36fce3161b280b6cb043dbffe01c) Docs updates for v0.7 (#159) + * [3461edae](https://github.com/argoproj/argo-events/commit/3461edaef3ae4536fb8bb6836baa0b4d3840aa04) Nats standard and streaming support (#156) + * [2605141e](https://github.com/argoproj/argo-events/commit/2605141ea204eafb1b235c8873ec58308be70859) Correct ConfigMap for Calendar gateway (#153) + * [98de8452](https://github.com/argoproj/argo-events/commit/98de8452e4b19bc2953723640c9f852f3c14416a) Refactor gateways (#148) + * [39a93710](https://github.com/argoproj/argo-events/commit/39a9371034c09cb54aa477bc13add08bbdde5c58) Refactor storage grid gateway (#141) + * [1565ccaf](https://github.com/argoproj/argo-events/commit/1565ccafca4dd10848ebb03255cf0ba8502f74f2) Update quickstart.md (#140) + * [285ed12c](https://github.com/argoproj/argo-events/commit/285ed12cabdcaea5703ffbbebb4adbcd3a361f70) Added missing namespace reference in quickstart guide (#139) + * [c3e96288](https://github.com/argoproj/argo-events/commit/c3e962887f7f11762d33e26a3d819738936d9c27) Fix examples (#138) + * [c6f66368](https://github.com/argoproj/argo-events/commit/c6f66368f0a151555eb3a42c0000eeea0f0813a1) Fixing examples (#135) + * [9cc42b6e](https://github.com/argoproj/argo-events/commit/9cc42b6e1beb961ef21ffc3092abf8657eab9cd2) Added deploy spec for sensor. Modified gateway spec (#134) + +### Contributors + + * Daisuke Taniwaki + * Fawad Halim + * Greg Roodt + * Vaibhav + +## v0.6 (2018-11-26) + + * [3d4b06b5](https://github.com/argoproj/argo-events/commit/3d4b06b5024727d54ac89e5657e5035b07ce3223) gateway deployment template inherits annotation of gateway spec (#129) + * [bcbbbf4d](https://github.com/argoproj/argo-events/commit/bcbbbf4ddce7a6940d30b51e7a41ee13e0ddc692) Branch guides -> master (#131) + * [8836d128](https://github.com/argoproj/argo-events/commit/8836d1280ffa704f1058b0670209796dac77fce0) Updated docs and changelog for 0.6 release (#124) + * [cc4e987e](https://github.com/argoproj/argo-events/commit/cc4e987e143c40371e2b966b0a7930f848b3753b) Added gateway for gitlab project hooks (#122) + * [201952e9](https://github.com/argoproj/argo-events/commit/201952e974f1e6bec079e2c0564fdb78f0b191bf) cleaning up gateways (#119) + * [32a1eace](https://github.com/argoproj/argo-events/commit/32a1eace3cd14de9ab3bef488b5eb33078b8287f) Fix links of github tree references (#118) + * [51460403](https://github.com/argoproj/argo-events/commit/51460403a538a5da865b5f8105c912dbe27a4834) WIP: File Watcher gateway as core gateway, notification filters to storage grid gateway, k8 configmap as trigger artifact location. (#117) + * [219c48c8](https://github.com/argoproj/argo-events/commit/219c48c89e2392a0d35c6d5c9321549b301dac3d) Update sensor deployment (#116) + * [e60d0c12](https://github.com/argoproj/argo-events/commit/e60d0c12c792176a143833ec02ade4a71540f27c) Updated user guide and storage grid custom gateway (#115) + * [4838fc72](https://github.com/argoproj/argo-events/commit/4838fc72495256e622af4d8c6451c8c7b0a4e75d) Fix the example CRD (#111) + * [2f07e14c](https://github.com/argoproj/argo-events/commit/2f07e14cdbc65b33a3a0da6c6129fc3da72c7122) Added support to pass non JSON payload to trigger (#113) + * [b05d22a7](https://github.com/argoproj/argo-events/commit/b05d22a7e8893251d9819d18730215f9669e4b47) Fix the example links (#110) + * [9acb5560](https://github.com/argoproj/argo-events/commit/9acb55602dde2ed3e9b92888f3cd136c4717aaae) Added support for passing complete event payload from signal to trigger. Updated docs (#94) + * [3272f1f0](https://github.com/argoproj/argo-events/commit/3272f1f0b8dc5fbe62d80da0aa41454b48f4dbb4) re-architecting around gateways and sensors (#92) + * [856ecb91](https://github.com/argoproj/argo-events/commit/856ecb915d079b1277ebf5b25facdbcd1032393e) don't escalate when not escalation policy not configured (#89) + * [2d5f00a5](https://github.com/argoproj/argo-events/commit/2d5f00a5e8051535047443a727ac2d4e0ab42538) updating contributing and quickstart docs (#88) + * [42ee0abb](https://github.com/argoproj/argo-events/commit/42ee0abbc0af8cf1eb3c888326c7055b22ced1a7) converted int to string. fixes bug (#87) + * [38548a78](https://github.com/argoproj/argo-events/commit/38548a7876c84877bc31c0a4492ea3daa016fa85) Execute filterData only if data filter is specified. (#83) + * [ab9b1b60](https://github.com/argoproj/argo-events/commit/ab9b1b60822e8cac95e91410076931801f0bbedf) passing entire s3 notification payload as event data (#78) + * [29f06e40](https://github.com/argoproj/argo-events/commit/29f06e406f2722de03fbd0b4562d707c1051d889) fixing resource service name + * [e9ca7445](https://github.com/argoproj/argo-events/commit/e9ca7445cf70944b12a214022757f2ddff21c1da) switching to logrus logger (#76) + * [a1138edf](https://github.com/argoproj/argo-events/commit/a1138edf1b8227b604b8aa59543590f471644356) Serve and read from local server for url test. (#77) + * [a55c6249](https://github.com/argoproj/argo-events/commit/a55c62493f48b569206f6b04871e94e148be9909) add runtime agnostic point to readme + * [0b73937c](https://github.com/argoproj/argo-events/commit/0b73937c2983689910c547b28085c98331e2e8a2) Use Kubernetes package version 1.10. (#74) + +### Contributors + + * Ace Eldeib + * Daisuke Taniwaki + * EdanSneh + * Matt Magaldi + * Matthew Magaldi + * Shri Javadekar + * Tayfun Wiechert + * Vaibhav + * Vladimir Pouzanov + +## v0.5-beta1 (2018-07-26) + + * [f0196b16](https://github.com/argoproj/argo-events/commit/f0196b16e534bab26bc5c20b38d50f5b5bffdb35) better sensor validation (#71) + * [0cb7b7fc](https://github.com/argoproj/argo-events/commit/0cb7b7fc8634ea93c97f1b933bdc867524b21e0a) removing port from webhook and revising signal impl (#68) + * [ddee2336](https://github.com/argoproj/argo-events/commit/ddee2336decebc957c1b2e633c85100dd276a487) Documentation updates. (#69) + * [b45849c8](https://github.com/argoproj/argo-events/commit/b45849c86f460c35c9709d2398a0eb0ec1591074) Add verifycert as an argument to URL trigger source (#67) + * [38d6adc9](https://github.com/argoproj/argo-events/commit/38d6adc939e6ddbbae99eafc3f65da83040185c3) Support URL trigger sources (#66) + * [f493709f](https://github.com/argoproj/argo-events/commit/f493709fe2427ee0bd35159d2137bbc004ceacfb) Support file trigger sources. (#65) + * [711ce7ac](https://github.com/argoproj/argo-events/commit/711ce7ac6de91bfa7669ae94197cfe1f74d3af44) add-resource-params, fix filter, testing on minikube, updated quickstart (#61) + * [bfea608f](https://github.com/argoproj/argo-events/commit/bfea608fd7a021034d046688ba0576074414690a) fix-webhook-unit-test (#58) + * [848e4689](https://github.com/argoproj/argo-events/commit/848e4689cdba90937492081beca268e899a80137) adding check for signal service, improving docs for signal services (#57) + * [51242826](https://github.com/argoproj/argo-events/commit/512428260c29595f82c62aa0ea450427ab8e4bf3) adding signal filters (#54) + * [b91b0132](https://github.com/argoproj/argo-events/commit/b91b0132205d54ca202cb8c404e84d7eb156a9ff) Support inline artifacts. (#52) + * [3e00ae1b](https://github.com/argoproj/argo-events/commit/3e00ae1b09e7af4c6d538957883e49e45b6371b3) implementing signals as microservice deployments (#49) + * [9ebb7fb1](https://github.com/argoproj/argo-events/commit/9ebb7fb1c2e0440bb879b9a4c98815b3f8341dab) Upgrade to code-gen v1.10. (#48) + * [cd48dc25](https://github.com/argoproj/argo-events/commit/cd48dc25d6cc20dc63cdab3d0af6a42883602851) Use argoproj/argo-events-ci-builder:1.0 for argo-events builds. (#51) + * [aea0cacc](https://github.com/argoproj/argo-events/commit/aea0cacc92e2349bbbee4c0702cf447478c3ccfc) fixing code-gen and openapi-gen failures (#47) + * [309f61fb](https://github.com/argoproj/argo-events/commit/309f61fb71a98ed1c9ca103482337d4bb60cc5ca) adding defined rbac roles for argo-events-sa (#43) + * [8fde9ce5](https://github.com/argoproj/argo-events/commit/8fde9ce548a64c7c7fb5ebd671b92ea15c04522c) Add workflow for CI. (#42) + * [b20db08c](https://github.com/argoproj/argo-events/commit/b20db08cbf5ba155137f8632409c813129d607d5) creating extensible signal plugin interface (#36) + * [a781888c](https://github.com/argoproj/argo-events/commit/a781888cc4388947b53582683a5bfe3ba6fad208) Update docs/quickstart.md with more thorough instructions. (#39) + * [0f071215](https://github.com/argoproj/argo-events/commit/0f07121509c2025e13e0cbd49783eccb929c542c) Make executors imagePullPolicy configurable. (#33) + +### Contributors + + * Matthew Magaldi + * Shri Javadekar + +## v0.5-alpha1 (2018-06-20) + + +### Contributors + + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4adbf9d3c6..e445d367af 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,60 +1 @@ -# Contributing - - -## Report a Bug -Open an issue. Please include descriptions of the following: -- Observations -- Expectations -- Steps to reproduce - -## Contribute a Bug Fix -- Report the bug first -- Create a pull request for the fix - -## Suggest a New Feature -- Create a new issue to start a discussion around new topic. Label the issue as `new-feature` - -## Setup your DEV environment -Argo Events is native to Kubernetes so you'll need a running Kubernetes cluster. This guide includes steps for `Minikube` for local development, but if you have another cluster you can ignore the Minikube specific step 3. - -### Requirements -- Golang 1.13 -- Docker - -### Installation & Setup - -#### 1. Get the project -``` -go get github.com/argoproj/argo-events -cd $GOPATH/src/github.com/argoproj/argo-events -``` - -#### 2. Vendor dependencies -``` -GO111MODULE=on go get github.com/cloudevents/sdk-go -``` - -#### 3. Start Minikube and point Docker Client to Minikube's Docker Daemon -``` -minikube start -eval $(minikube docker-env) -``` - -#### 5. Build the project -``` -make build -``` - -Follow [README](README.md#install) to install components. - -## Changing Types -If you're making a change to the `pkg/apis` package, please ensure you re-run: - - -``` -make codegen -``` - -### Test Policy - -Changes without either unit or e2e tests are unlikely to be accepted. +See [docs/CONTRIBUTING.md](docs/CONTRIBUTING.md). diff --git a/DEPENDENCIES.md b/DEPENDENCIES.md index 300a1b97a1..3d4b400803 100644 --- a/DEPENDENCIES.md +++ b/DEPENDENCIES.md @@ -12,7 +12,7 @@ | minio/minio-go | Apache-2.0 | | nats-io/go-nats | Apache-2.0 | | robfig/cron | MIT | -| streadway/amqp | BSD-2-Clause | +| rabbitmq/amqp091-go | BSD-2-Clause | | rs/zerolog | MIT | | mitchellh/hashstructure | MIT | | nats-io/gnatsd | Apache-2.0 | diff --git a/Dockerfile b/Dockerfile index e800a57173..3f645acc41 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,13 +2,13 @@ ARG ARCH=$TARGETARCH #################################################################################################### # base #################################################################################################### -FROM alpine:3.12.3 as base +FROM alpine:3.16.2 as base ARG ARCH RUN apk update && apk upgrade && \ apk add ca-certificates && \ apk --no-cache add tzdata -ENV ARGO_VERSION=v3.1.1 +ENV ARGO_VERSION=v3.5.8 RUN wget -q https://github.com/argoproj/argo-workflows/releases/download/${ARGO_VERSION}/argo-linux-${ARCH}.gz RUN gunzip -f argo-linux-${ARCH}.gz diff --git a/Makefile b/Makefile index 7cb052c7b9..160f3aa766 100644 --- a/Makefile +++ b/Makefile @@ -15,9 +15,9 @@ EXECUTABLES = curl docker gzip go # docker image publishing options DOCKER_PUSH?=false -IMAGE_NAMESPACE?=quay.io/argoproj -VERSION?=v1.4.0 -BASE_VERSION:=v1.4.0 +IMAGE_NAMESPACE?=quay.io/codefresh +VERSION?=v1.9.2-cap-CR-24607 +BASE_VERSION:=v1.9.2-cap-CR-24607 override LDFLAGS += \ -X ${PACKAGE}.version=${VERSION} \ @@ -37,6 +37,8 @@ VERSION=$(GIT_TAG) override LDFLAGS += -X ${PACKAGE}.gitTag=${GIT_TAG} endif +K3D ?= $(shell [ "`command -v kubectl`" != '' ] && [ "`command -v k3d`" != '' ] && [[ "`kubectl config current-context`" =~ k3d-* ]] && echo true || echo false) + # Check that the needed executables are available, else exit before the build K := $(foreach exec,$(EXECUTABLES), $(if $(shell which $(exec)),some string,$(error "No $(exec) in PATH"))) @@ -63,16 +65,23 @@ dist/$(BINARY_NAME)-%: CGO_ENABLED=0 $(GOARGS) go build -v -ldflags '${LDFLAGS}' -o ${DIST_DIR}/$(BINARY_NAME)-$* ./cmd .PHONY: image -image: dist/$(BINARY_NAME)-linux-amd64 +BUILD_DIST = dist/$(BINARY_NAME)-linux-amd64 +ifeq ($(shell uname -m),arm64) +BUILD_DIST = dist/$(BINARY_NAME)-linux-arm64 +endif +image: clean $(BUILD_DIST) DOCKER_BUILDKIT=1 docker build -t $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) -f $(DOCKERFILE) . @if [ "$(DOCKER_PUSH)" = "true" ]; then docker push $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION); fi +ifeq ($(K3D),true) + k3d image import $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) +endif -image-linux-%: dist/$(BINARY_NAME)-linux-$* +image-linux-%: dist/$(BINARY_NAME)-linux-% DOCKER_BUILDKIT=1 docker build --build-arg "ARCH=$*" -t $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION)-linux-$* --platform "linux/$*" --target $(BINARY_NAME) -f $(DOCKERFILE) . @if [ "$(DOCKER_PUSH)" = "true" ]; then docker push $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION)-linux-$*; fi image-multi: set-qemu dist/$(BINARY_NAME)-linux-arm64.gz dist/$(BINARY_NAME)-linux-amd64.gz - docker buildx build --tag $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) --platform linux/amd64,linux/arm64 --file ./Dockerfile ${PUSH_OPTION} . + docker buildx build --sbom=false --provenance=false --tag $(IMAGE_NAMESPACE)/$(BINARY_NAME):$(VERSION) --target $(BINARY_NAME) --platform linux/amd64,linux/arm64 --file ./Dockerfile ${PUSH_OPTION} . set-qemu: docker pull tonistiigi/binfmt:latest @@ -82,7 +91,19 @@ test: go test $(shell go list ./... | grep -v /vendor/ | grep -v /test/e2e/) -race -short -v test-functional: - go test -v -timeout 10m -count 1 --tags functional -p 1 ./test/e2e +ifeq ($(EventBusDriver),kafka) + kubectl -n argo-events apply -k test/manifests/kafka + kubectl -n argo-events wait -l statefulset.kubernetes.io/pod-name=kafka-0 --for=condition=ready pod --timeout=60s +endif + go test -v -timeout 20m -count 1 --tags functional -p 1 ./test/e2e +ifeq ($(EventBusDriver),kafka) + kubectl -n argo-events delete -k test/manifests/kafka +endif + +# to run just one of the functional e2e tests by name (i.e. 'make TestMetricsWithWebhook'): +Test%: + go test -v -timeout 10m -count 1 --tags functional -p 1 ./test/e2e -run='.*/$*' + coverage: go test -covermode=count -coverprofile=profile.cov $(shell go list ./... | grep -v /vendor/ | grep -v /test/e2e/) @@ -97,13 +118,17 @@ crds: .PHONY: manifests manifests: crds - kustomize build manifests/cluster-install > manifests/install.yaml - kustomize build manifests/namespace-install > manifests/namespace-install.yaml - kustomize build manifests/extensions/validating-webhook > manifests/install-validating-webhook.yaml + kubectl kustomize manifests/cluster-install > manifests/install.yaml + kubectl kustomize manifests/namespace-install > manifests/namespace-install.yaml + kubectl kustomize manifests/extensions/validating-webhook > manifests/install-validating-webhook.yaml .PHONY: swagger swagger: ./hack/update-swagger.sh ${VERSION} + $(MAKE) api/jsonschema/schema.json + +api/jsonschema/schema.json: api/openapi-spec/swagger.json hack/jsonschema/main.go + go run ./hack/jsonschema .PHONY: codegen codegen: @@ -126,16 +151,16 @@ docs/assets/diagram.png: go-diagrams/diagram.dot .PHONY: start start: image kubectl apply -f test/manifests/argo-events-ns.yaml - kustomize build test/manifests | sed 's@quay.io/argoproj/@$(IMAGE_NAMESPACE)/@' | sed 's/:$(BASE_VERSION)/:$(VERSION)/' | kubectl -n argo-events apply -l app.kubernetes.io/part-of=argo-events --prune --force -f - + kubectl kustomize test/manifests | sed 's@quay.io/codefresh/@$(IMAGE_NAMESPACE)/@' | sed 's/:$(BASE_VERSION)/:$(VERSION)/' | kubectl -n argo-events apply -l app.kubernetes.io/part-of=argo-events --prune=false --force -f - kubectl -n argo-events wait --for=condition=Ready --timeout 60s pod --all $(GOPATH)/bin/golangci-lint: - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.26.0 + curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b `go env GOPATH`/bin v1.54.1 .PHONY: lint lint: $(GOPATH)/bin/golangci-lint go mod tidy - golangci-lint run --fix --verbose --concurrency 4 --timeout 5m + golangci-lint run --fix --verbose --concurrency 4 --timeout 5m --enable goimports # release - targets only available on release branch ifneq ($(findstring release,$(GIT_BRANCH)),) @@ -162,7 +187,7 @@ check-version-warning: .PHONY: update-manifests-version update-manifests-version: - cat manifests/base/kustomization.yaml | sed 's/newTag: .*/newTag: $(VERSION)/' | sed 's@value: quay.io/argoproj/argo-events:.*@value: quay.io/argoproj/argo-events:$(VERSION)@' > /tmp/base_kustomization.yaml + cat manifests/base/kustomization.yaml | sed 's/newTag: .*/newTag: $(VERSION)/' | sed 's@value: quay.io/codefresh/argo-events:.*@value: quay.io/codefresh/argo-events:$(VERSION)@' > /tmp/base_kustomization.yaml mv /tmp/base_kustomization.yaml manifests/base/kustomization.yaml cat manifests/extensions/validating-webhook/kustomization.yaml | sed 's/newTag: .*/newTag: $(VERSION)/' > /tmp/wh_kustomization.yaml mv /tmp/wh_kustomization.yaml manifests/extensions/validating-webhook/kustomization.yaml @@ -171,4 +196,4 @@ update-manifests-version: .PHONY: checksums checksums: - for f in ./dist/$(BINARY_NAME)-*.gz; do openssl dgst -sha256 "$$f" | awk ' { print $$2 }' > "$$f".sha256 ; done + sha256sum ./dist/$(BINARY_NAME)-*.gz | awk -F './dist/' '{print $$1 $$2}' > ./dist/$(BINARY_NAME)-checksums.txt diff --git a/OWNERS b/OWNERS index 9716a09c58..1e91f7c1e9 100644 --- a/OWNERS +++ b/OWNERS @@ -1,9 +1,9 @@ owners: -- VaibhavPage +- whynowy + +reviewers: +- daniel-codefresh approvers: -- dtaniwaki - edlee2121 - jessesuen -- VaibhavPage -- whynowy diff --git a/README.md b/README.md index 5c54aaac22..27d91bd977 100644 --- a/README.md +++ b/README.md @@ -4,8 +4,10 @@ [![slack](https://img.shields.io/badge/slack-argoproj-brightgreen.svg?logo=slack)](https://argoproj.github.io/community/join-slack) [![Build Status](https://travis-ci.org/argoproj/argo-events.svg?branch=master)](https://travis-ci.org/argoproj/argo-events) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/3832/badge)](https://bestpractices.coreinfrastructure.org/projects/3832) +[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-events/badge)](https://api.securityscorecards.dev/projects/github.com/argoproj/argo-events) [![GoDoc](https://godoc.org/github.com/argoproj/argo-events?status.svg)](https://godoc.org/github.com/argoproj/argo-events/pkg/apis) [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) +[![Artifact HUB](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/argo-events)](https://artifacthub.io/packages/helm/argo/argo-events) ## What is Argo Events? @@ -47,6 +49,7 @@ The Argo Workflows has an API and user interface support Argo Events. ## Blogs and Presentations +* [Awesome-Argo: A Curated List of Awesome Projects and Resources Related to Argo](https://github.com/terrytangyuan/awesome-argo) * [Automation of Everything - How To Combine Argo Events, Workflows & Pipelines, CD, and Rollouts](https://youtu.be/XNXJtxkUKeY) * [Argo Events - Event-Based Dependency Manager for Kubernetes](https://youtu.be/sUPkGChvD54) * [Argo Events Deep-dive](https://youtu.be/U4tCYcCK20w) @@ -64,9 +67,13 @@ The Argo Workflows has an API and user interface support Argo Events. Participation in the Argo Events project is governed by the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). -[Contributions](https://github.com/codefresh-io/argo-events/issues) are more than welcome, if you are interested take a look -at our [Contributing Guidelines](./CONTRIBUTING.md). +[Contributions](https://github.com/argoproj/argo-events/issues) are more than welcome, if you are interested take a look +at our [Contributing Guidelines](./docs/CONTRIBUTING.md). -## License +## License Apache License Version 2.0, see [LICENSE](./LICENSE) + +## Security + +Please see [SECURITY.md](https://github.com/argoproj/argo-events/blob/master/SECURITY.md) diff --git a/SECURITY.md b/SECURITY.md index 5d73b48a7d..fabca623d6 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,16 +1,36 @@ -# Security +# Security policy for Argo Events -## Reporting Vulnerabilities +## Reporting a Vulnerability -Please report security vulnerabilities by e-mailing: +If you find a security related bug in Argo Events, we kindly ask you for responsible +disclosure and for giving us appropriate time to react, analyze and develop a +fix to mitigate the found security vulnerability. -* [Jesse_Suen@intuit.com](mailto:Jesse_Suen@intuit.com) -* [Alex_Collins@intuit.com](mailto:Alex_Collins@intuit.com) -* [Edward_Lee@intuit.com](mailto:Edward_Lee@intuit.com) +Please report vulnerabilities by e-mail to the following address: + +* cncf-argo-security@lists.cncf.io + +All vulnerabilities and associated information will be treated with full confidentiality. ## Public Disclosure -Security vulnerabilities will be disclosed via [release notes](docs/releasing.md). +Security vulnerabilities will be disclosed via [release notes](docs/releasing.md) and using the +[GitHub Security Advisories](https://github.com/argoproj/argo-events/security/advisories) +feature to keep our community well informed, and will credit you for your findings (unless you prefer to stay anonymous, of course). + +## Internet Bug Bounty collaboration + +We're happy to announce that the Argo project is collaborating with the great +folks over at +[Hacker One](https://hackerone.com/) and their +[Internet Bug Bounty program](https://hackerone.com/ibb) +to reward the awesome people who find security vulnerabilities in the four +main Argo projects (CD, Events, Rollouts and Workflows) and then work with +us to fix and disclose them in a responsible manner. + +If you report a vulnerability to us as outlined in this security policy, we +will work together with you to find out whether your finding is eligible for +claiming a bounty, and also on how to claim it. ## Vulnerability Scanning diff --git a/USERS.md b/USERS.md index 4394cfa80c..614751872e 100644 --- a/USERS.md +++ b/USERS.md @@ -4,19 +4,52 @@ Organizations below are **officially** using Argo Events. Please send a PR with 1. [3Rein](https://www.3rein.com) 1. [7shifts](https://www.7shifts.com) +1. [Adobe](https://adobe.com/) +1. [Akuity](https://akuity.io/) +1. [Alibaba Group](https://www.alibabagroup.com/) +1. [Ancestry](https://www.ancestry.com/) +1. [Anova](https://www.anova.com/) +1. [Ant Group](https://www.antgroup.com/) +1. [ArthurAI](https://arthur.ai/) 1. [BioBox Analytics](https://biobox.io) 1. [BlackRock](https://www.blackrock.com/) +1. [Bloomberg](https://www.bloomberg.com/) 1. [Canva](https://www.canva.com/) +1. [Carrefour](https://www.carrefour.com/) +1. [Codefresh](https://codefresh.io/) +1. [Dazz](https://dazz.io/) 1. [DevSamurai](https://www.devsamurai.com/) +1. [Elastic](https://elastic.co/) +1. [Enso Security](https://enso.security) 1. [Fairwinds](https://fairwinds.com/) +1. [Gepardec](https://gepardec.com/) +1. [GHGSat](https://www.ghgsat.com/) +1. [Gllue](https://gllue.com/) 1. [Greenhouse Software](https://www.greenhouse.io/) +1. [Helio](https://helio.exchange) +1. [iFood](https://www.ifood.com.br) 1. [InsideBoard](https://www.insideboard.com) 1. [Intuit](https://www.intuit.com/) +1. [FikaWorks](https://fika.works/) +1. [Loam](https://www.getloam.com/) +1. [MariaDB](https://mariadb.com/) +1. [Mobimeo GmbH](https://mobimeo.com/en/home/) 1. [OneCause](https://www.onecause.com/) +1. [Pinnacle Reliability](https://pinnaclereliability.com/) +1. [Phrase](https://www.phrase.com/) 1. [Produvar](https://www.produvar.com/) 1. [ProPoint Solutions](https://supersalon.com) 1. [PwC Labs](https://www.pwc.com/us/en/careers/why-pwc/what-we-do/what-we-do-pwc-labs.html) 1. [Rakuten](https://www.rakuten.com) +1. [Rookout](https://www.rookout.com/) 1. [RTL Nederland](https://www.rtl.nl) +1. [Salesforce](https://salesforce.com) +1. [SAP Concur](https://www.concur.com/) +1. [Softonic](https://www.softonic.com) +1. [Swissblock Technologies](https://swissblock.net) +1. [Ubie](https://ubie.life/) 1. [Viaduct](https://www.viaduct.ai/) +1. [Yubo](https://www.yubo.live/) +1. [WooliesX](https://wooliesx.com.au/) 1. [Woolworths Group](https://www.woolworthsgroup.com.au/) +1. [Zillow Group](https://www.zillow.com) \ No newline at end of file diff --git a/api/event-bus.html b/api/event-bus.html index c606108c27..66314c4a09 100644 --- a/api/event-bus.html +++ b/api/event-bus.html @@ -47,18 +47,614 @@

BusConfig +(Optional) + + + + +jetstream
+ + +JetStreamConfig + + + + +(Optional) + + + + +kafka
+ + +KafkaBus + + + + +(Optional) + + + + +

ContainerTemplate +

+

+(Appears on: +JetStreamBus, +NativeStrategy) +

+

+

ContainerTemplate defines customized spec for a container

+

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+resources
+ + +Kubernetes core/v1.ResourceRequirements + + +
+
+imagePullPolicy
+ + +Kubernetes core/v1.PullPolicy + + +
+
+securityContext
+ + +Kubernetes core/v1.SecurityContext + + +
+
+

EventBus +

+

+

EventBus is the definition of a eventbus resource

+

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +EventBusSpec + + +
+
+
+ + + + + + + + + + + + + + + + + +
+nats
+ + +NATSBus + + +
+(Optional) +

NATS eventbus

+
+jetstream
+ + +JetStreamBus + + +
+(Optional) +
+kafka
+ + +KafkaBus + + +
+(Optional) +

Kafka eventbus

+
+jetstreamExotic
+ + +JetStreamConfig + + +
+(Optional) +

Exotic JetStream

+
+
+status
+ + +EventBusStatus + + +
+(Optional) +
+

EventBusSpec +

+

+(Appears on: +EventBus) +

+

+

EventBusSpec refers to specification of eventbus resource

+

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+nats
+ + +NATSBus + + +
+(Optional) +

NATS eventbus

+
+jetstream
+ + +JetStreamBus + + +
+(Optional) +
+kafka
+ + +KafkaBus + + +
+(Optional) +

Kafka eventbus

+
+jetstreamExotic
+ + +JetStreamConfig + + +
+(Optional) +

Exotic JetStream

+
+

EventBusStatus +

+

+(Appears on: +EventBus) +

+

+

EventBusStatus holds the status of the eventbus resource

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+Status
+ +github.com/argoproj/argo-events/pkg/apis/common.Status + +
+

+(Members of Status are embedded into this type.) +

+
+config
+ + +BusConfig + + +
+

Config holds the fininalized configuration of EventBus

+
+

JetStreamBus +

+

+(Appears on: +EventBusSpec) +

+

+

JetStreamBus holds the JetStream EventBus information

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+version
+ +string + +
+

JetStream version, such as “2.7.3”

+
+replicas
+ +int32 + +
+

JetStream StatefulSet size

+
+containerTemplate
+ + +ContainerTemplate + + +
+(Optional) +

ContainerTemplate contains customized spec for Nats JetStream container

+
+reloaderContainerTemplate
+ + +ContainerTemplate + + +
+(Optional) +

ReloaderContainerTemplate contains customized spec for config reloader container

+
+metricsContainerTemplate
+ + +ContainerTemplate + + +
+(Optional) +

MetricsContainerTemplate contains customized spec for metrics container

+
+persistence
+ + +PersistenceStrategy + + +
+(Optional) +
+metadata
+ +github.com/argoproj/argo-events/pkg/apis/common.Metadata + +
+

Metadata sets the pods’s metadata, i.e. annotations and labels

+
+nodeSelector
+ +map[string]string + +
+(Optional) +

NodeSelector is a selector which must be true for the pod to fit on a node. +Selector which must match a node’s labels for the pod to be scheduled on that node. +More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/

+
+tolerations
+ + +[]Kubernetes core/v1.Toleration + + +
+(Optional) +

If specified, the pod’s tolerations.

+
+securityContext
+ + +Kubernetes core/v1.PodSecurityContext + + +
+(Optional) +

SecurityContext holds pod-level security attributes and common container settings. +Optional: Defaults to empty. See type description for default values of each field.

+
+imagePullSecrets
+ + +[]Kubernetes core/v1.LocalObjectReference + + +
+(Optional) +

ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. +If specified, these secrets will be passed to individual puller implementations for them to use. For example, +in the case of docker, only DockerConfig type secrets are honored. +More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod

+
+priorityClassName
+ +string + +
+(Optional) +

If specified, indicates the Redis pod’s priority. “system-node-critical” +and “system-cluster-critical” are two special keywords which indicate the +highest priorities with the former being the highest priority. Any other +name must be defined by creating a PriorityClass object with that name. +If not specified, the pod priority will be default or zero if there is no +default. +More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/

+
+priority
+ +int32 + +
+(Optional) +

The priority value. Various system components use this field to find the +priority of the Redis pod. When Priority Admission Controller is enabled, +it prevents users from setting this field. The admission controller populates +this field from PriorityClassName. +The higher the value, the higher the priority. +More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/

+
+affinity
+ + +Kubernetes core/v1.Affinity + + +
+(Optional) +

The pod’s scheduling constraints +More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/

+
+serviceAccountName
+ +string + +
+(Optional) +

ServiceAccountName to apply to the StatefulSet

+
+settings
+ +string + +
+(Optional) +

JetStream configuration, if not specified, global settings in controller-config will be used. +See https://docs.nats.io/running-a-nats-service/configuration#jetstream. +Only configure “max_memory_store” or “max_file_store”, do not set “store_dir” as it has been hardcoded.

+
+startArgs
+ +[]string + +
+(Optional) +

Optional arguments to start nats-server. For example, “-D” to enable debugging output, “-DV” to enable debugging and tracing. +Check https://docs.nats.io/ for all the available arguments.

+
+streamConfig
+ +string + +
+(Optional) +

Optional configuration for the streams to be created in this JetStream service, if specified, it will be merged with the default configuration in controller-config. +It accepts a YAML format configuration, available fields include, “maxBytes”, “maxMsgs”, “maxAge” (e.g. 72h), “replicas” (1, 3, 5), “duplicates” (e.g. 5m).

+
+maxPayload
+ +string + +
+(Optional) +

Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB

-

ContainerTemplate +

JetStreamConfig

(Appears on: -NativeStrategy) +BusConfig, +EventBusSpec)

-

ContainerTemplate defines customized spec for a container

@@ -70,22 +666,51 @@

ContainerTemplate

+ + + + + + + +
-resources
+url
- -Kubernetes core/v1.ResourceRequirements +string + +
+

JetStream (Nats) URL

+
+accessSecret
+ + +Kubernetes core/v1.SecretKeySelector
+(Optional) +

Secret for auth

+
+streamConfig
+ +string + +
+(Optional)
-

EventBus +

KafkaBus

-

EventBus is the definition of a eventbus resource

+(Appears on: +BusConfig, +EventBusSpec) +

+

+

KafkaBus holds the KafkaBus EventBus information

@@ -97,102 +722,86 @@

EventBus

- -
-metadata
+url
- -Kubernetes meta/v1.ObjectMeta - +string
-Refer to the Kubernetes API documentation for the fields of the -metadata field. +

URL to kafka cluster, multiple URLs separated by comma

-spec
+topic
- -EventBusSpec - +string
-
-
- +(Optional) +

Topic name, defaults to {namespace_name}-{eventbus_name}

+ + - -
-nats
+version
- -NATSBus - +string
-

NATS eventbus

-
+(Optional) +

Kafka version, sarama defaults to the oldest supported stable version

-status
+tls
- -EventBusStatus - +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
(Optional) +

TLS configuration for the kafka client.

-

EventBusSpec -

-

-(Appears on: -EventBus) -

-

-

EventBusSpec refers to specification of eventbus resource

-

- - - - + + - -
FieldDescription +sasl
+ +github.com/argoproj/argo-events/pkg/apis/common.SASLConfig + +
+(Optional) +

SASL configuration for the kafka client

+
-nats
+consumerGroup
- -NATSBus + +KafkaConsumerGroup
-

NATS eventbus

+(Optional) +

Consumer group for kafka client

-

EventBusStatus +

KafkaConsumerGroup

(Appears on: -EventBus) +KafkaBus)

-

EventBusStatus holds the status of the eventbus resource

@@ -204,28 +813,38 @@

EventBusStatus

+ + + + @@ -333,7 +952,7 @@

NATSConfig

- - - - + + + + + + + + + + + + + + + + + + + + + + + +
-Status
+groupName
-github.com/argoproj/argo-events/pkg/apis/common.Status +string
-

-(Members of Status are embedded into this type.) -

+(Optional) +

Consumer group name, defaults to {namespace_name}-{sensor_name}

-config
+rebalanceStrategy
- -BusConfig - +string
-

Config holds the fininalized configuration of EventBus

+(Optional) +

Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.

+
+startOldest
+ +bool + +
+(Optional) +

When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false

accessSecret
- + Kubernetes core/v1.SecretKeySelector @@ -387,17 +1006,6 @@

NativeStrategy

-antiAffinity
- -bool - -
-

Deprecated, use Affinity instead, will be removed in v1.5

-
persistence
@@ -455,7 +1063,7 @@

NativeStrategy

tolerations
- + []Kubernetes core/v1.Toleration @@ -480,7 +1088,7 @@

NativeStrategy

securityContext
- + Kubernetes core/v1.PodSecurityContext @@ -507,7 +1115,7 @@

NativeStrategy

imagePullSecrets
- + []Kubernetes core/v1.LocalObjectReference @@ -571,7 +1179,7 @@

NativeStrategy

affinity
- + Kubernetes core/v1.Affinity @@ -604,12 +1212,79 @@

NativeStrategy

Total size of messages per channel, 0 means unlimited. Defaults to 1GB

+maxSubs
+ +uint64 + +
+

Maximum number of subscriptions per channel, 0 means unlimited. Defaults to 1000

+
+maxPayload
+ +string + +
+

Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB

+
+raftHeartbeatTimeout
+ +string + +
+

Specifies the time in follower state without a leader before attempting an election, i.e. “72h”, “4h35m”. Defaults to 2s

+
+raftElectionTimeout
+ +string + +
+

Specifies the time in candidate state without a leader before attempting an election, i.e. “72h”, “4h35m”. Defaults to 2s

+
+raftLeaseTimeout
+ +string + +
+

Specifies how long a leader waits without being able to contact a quorum of nodes before stepping down as leader, i.e. “72h”, “4h35m”. Defaults to 1s

+
+raftCommitTimeout
+ +string + +
+

Specifies the time without an Apply() operation before sending an heartbeat to ensure timely commit, i.e. “72h”, “4h35m”. Defaults to 100ms

+

PersistenceStrategy

(Appears on: +JetStreamBus, NativeStrategy)

@@ -640,7 +1315,7 @@

PersistenceStrategy accessMode
- + Kubernetes core/v1.PersistentVolumeAccessMode diff --git a/api/event-bus.md b/api/event-bus.md index 2d79d6355f..1af65d8e46 100644 --- a/api/event-bus.md +++ b/api/event-bus.md @@ -61,52 +61,594 @@ Description NATSConfig +(Optional) + + + + +jetstream
+ JetStreamConfig + + + +(Optional) + + + + +kafka
+KafkaBus + + +(Optional) + + + + +

+ContainerTemplate +

+

+(Appears on: +JetStreamBus, +NativeStrategy) +

+

+

+ContainerTemplate defines customized spec for a container +

+

+ + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+resources
+ +Kubernetes core/v1.ResourceRequirements +
+
+imagePullPolicy
+ +Kubernetes core/v1.PullPolicy +
+
+securityContext
+ +Kubernetes core/v1.SecurityContext +
+
+

+EventBus +

+

+

+EventBus is the definition of a eventbus resource +

+

+ + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+metadata
+ +Kubernetes meta/v1.ObjectMeta +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ EventBusSpec +
+

+ + + + + + + + + + + + + + + + + +
+nats
+NATSBus +
+(Optional) +

+NATS eventbus +

+
+jetstream
+ JetStreamBus +
+(Optional) +
+kafka
+KafkaBus +
+(Optional) +

+Kafka eventbus +

+
+jetstreamExotic
+ JetStreamConfig + +
+(Optional) +

+Exotic JetStream +

+
+
+status
+ EventBusStatus + +
+(Optional) +
+

+EventBusSpec +

+

+(Appears on: +EventBus) +

+

+

+EventBusSpec refers to specification of eventbus resource +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+nats
+NATSBus +
+(Optional) +

+NATS eventbus +

+
+jetstream
+ JetStreamBus +
+(Optional) +
+kafka
+KafkaBus +
+(Optional) +

+Kafka eventbus +

+
+jetstreamExotic
+ JetStreamConfig + +
+(Optional) +

+Exotic JetStream +

+
+

+EventBusStatus +

+

+(Appears on: +EventBus) +

+

+

+EventBusStatus holds the status of the eventbus resource +

+

+ + + + + + + + + + + + + + + + + +
+Field + +Description +
+Status
+github.com/argoproj/argo-events/pkg/apis/common.Status +
+

+(Members of Status are embedded into this type.) +

+
+config
+BusConfig +
+

+Config holds the fininalized configuration of EventBus +

+
+

+JetStreamBus +

+

+(Appears on: +EventBusSpec) +

+

+

+JetStreamBus holds the JetStream EventBus information +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + - -
+Field + +Description +
+version
string +
+

+JetStream version, such as “2.7.3” +

+
+replicas
int32 +
+

+JetStream StatefulSet size +

+
+containerTemplate
+ ContainerTemplate + +
+(Optional) +

+ContainerTemplate contains customized spec for Nats JetStream container +

+
+reloaderContainerTemplate
+ ContainerTemplate + +
+(Optional) +

+ReloaderContainerTemplate contains customized spec for config reloader +container +

+
+metricsContainerTemplate
+ ContainerTemplate + +
+(Optional) +

+MetricsContainerTemplate contains customized spec for metrics container +

+
+persistence
+ PersistenceStrategy + +
+(Optional) +
+metadata
+github.com/argoproj/argo-events/pkg/apis/common.Metadata +
+

+Metadata sets the pods’s metadata, i.e. annotations and labels +

+
+nodeSelector
map\[string\]string +
+(Optional) +

+NodeSelector is a selector which must be true for the pod to fit on a +node. Selector which must match a node’s labels for the pod to be +scheduled on that node. More info: +https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ +

+
+tolerations
+ +\[\]Kubernetes core/v1.Toleration +
+(Optional) +

+If specified, the pod’s tolerations. +

+
+securityContext
+ +Kubernetes core/v1.PodSecurityContext +
+(Optional) +

+SecurityContext holds pod-level security attributes and common container +settings. Optional: Defaults to empty. See type description for default +values of each field. +

+
+imagePullSecrets
+ +\[\]Kubernetes core/v1.LocalObjectReference +
+(Optional) +

+ImagePullSecrets is an optional list of references to secrets in the +same namespace to use for pulling any of the images used by this +PodSpec. If specified, these secrets will be passed to individual puller +implementations for them to use. For example, in the case of docker, +only DockerConfig type secrets are honored. More info: +https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod +

+
+priorityClassName
string +
+(Optional) +

+If specified, indicates the Redis pod’s priority. “system-node-critical” +and “system-cluster-critical” are two special keywords which indicate +the highest priorities with the former being the highest priority. Any +other name must be defined by creating a PriorityClass object with that +name. If not specified, the pod priority will be default or zero if +there is no default. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +

-

-ContainerTemplate -

+ + +priority
int32 + + +(Optional)

-(Appears on: -NativeStrategy) +The priority value. Various system components use this field to find the +priority of the Redis pod. When Priority Admission Controller is +enabled, it prevents users from setting this field. The admission +controller populates this field from PriorityClassName. The higher the +value, the higher the priority. More info: +https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/

+ + + + +affinity
+ +Kubernetes core/v1.Affinity + + +(Optional)

+The pod’s scheduling constraints More info: +https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +

+ + + + +serviceAccountName
string + + +(Optional)

-ContainerTemplate defines customized spec for a container +ServiceAccountName to apply to the StatefulSet

+ + + + +settings
string + + +(Optional) +

+JetStream configuration, if not specified, global settings in +controller-config will be used. See +https://docs.nats.io/running-a-nats-service/configuration#jetstream. +Only configure “max_memory_store” or “max_file_store”, do not set +“store_dir” as it has been hardcoded.

- - + + - - + + - - + + + +
-Field - -Description - +startArgs
\[\]string +
+(Optional) +

+Optional arguments to start nats-server. For example, “-D” to enable +debugging output, “-DV” to enable debugging and tracing. Check +https://docs.nats.io/ for all the +available arguments. +

+
-resources
- -Kubernetes core/v1.ResourceRequirements +streamConfig
string +
+(Optional) +

+Optional configuration for the streams to be created in this JetStream +service, if specified, it will be merged with the default configuration +in controller-config. It accepts a YAML format configuration, available +fields include, “maxBytes”, “maxMsgs”, “maxAge” (e.g. 72h), “replicas” +(1, 3, 5), “duplicates” (e.g. 5m). +

+
+maxPayload
string
+(Optional) +

+Maximum number of bytes in a message payload, 0 means unlimited. +Defaults to 1MB +

-

-EventBus +

+JetStreamConfig

-

-EventBus is the definition of a eventbus resource +(Appears on: +BusConfig, +EventBusSpec)

+

@@ -122,42 +664,30 @@ Description - -
-metadata
- -Kubernetes meta/v1.ObjectMeta +url
string
-Refer to the Kubernetes API documentation for the fields of the -metadata field. +

+JetStream (Nats) URL +

-spec
- EventBusSpec -
-

- - - -
-nats
-NATSBus +accessSecret
+ +Kubernetes core/v1.SecretKeySelector
+(Optional)

-NATS eventbus +Secret for auth

-
-status
- EventBusStatus - +streamConfig
string
(Optional) @@ -165,16 +695,17 @@ NATS eventbus
-

-EventBusSpec +

+KafkaBus

(Appears on: -EventBus) +BusConfig, +EventBusSpec)

-EventBusSpec refers to specification of eventbus resource +KafkaBus holds the KafkaBus EventBus information

@@ -191,28 +722,83 @@ Description + + + + + + + + + + + + + + + + + + + +
-nats
-NATSBus +url
string

-NATS eventbus +URL to kafka cluster, multiple URLs separated by comma +

+
+topic
string +
+(Optional) +

+Topic name, defaults to {namespace_name}-{eventbus_name} +

+
+version
string +
+(Optional) +

+Kafka version, sarama defaults to the oldest supported stable version +

+
+tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig +
+(Optional) +

+TLS configuration for the kafka client. +

+
+sasl
+github.com/argoproj/argo-events/pkg/apis/common.SASLConfig +
+(Optional) +

+SASL configuration for the kafka client +

+
+consumerGroup
+ KafkaConsumerGroup + +
+(Optional) +

+Consumer group for kafka client

-

-EventBusStatus +

+KafkaConsumerGroup

(Appears on: -EventBus) +KafkaBus)

-

-EventBusStatus holds the status of the eventbus resource -

@@ -228,23 +814,36 @@ Description + + + + @@ -359,7 +958,7 @@ Auth strategy, default to AuthStrategyNone - - - - @@ -588,7 +1177,7 @@ value, the higher the priority. More info: + + + + + + + + + + + + + + + + + + + + + + + +
-Status
-github.com/argoproj/argo-events/pkg/apis/common.Status +groupName
string
+(Optional)

-(Members of Status are embedded into this type.) +Consumer group name, defaults to {namespace_name}-{sensor_name}

-config
-BusConfig +rebalanceStrategy
string
+(Optional)

-Config holds the fininalized configuration of EventBus +Rebalance strategy can be one of: sticky, roundrobin, range. Range is +the default. +

+
+startOldest
bool +
+(Optional) +

+When starting up a new group do we want to start from the oldest event +(true) or the newest event (false), defaults to false

accessSecret
- + Kubernetes core/v1.SecretKeySelector
@@ -415,16 +1014,6 @@ Size is the NATS StatefulSet size
-antiAffinity
bool -
-

-Deprecated, use Affinity instead, will be removed in v1.5 -

-
persistence
PersistenceStrategy @@ -476,7 +1065,7 @@ scheduled on that node. More info:
tolerations
- + \[\]Kubernetes core/v1.Toleration
@@ -500,7 +1089,7 @@ Metadata sets the pods’s metadata, i.e. annotations and labels
securityContext
- + Kubernetes core/v1.PodSecurityContext
@@ -526,7 +1115,7 @@ Max Age of existing messages, i.e. “72h”, “4h35m”
imagePullSecrets
- + \[\]Kubernetes core/v1.LocalObjectReference
@@ -537,7 +1126,7 @@ same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: -https://kubernetes.io/docs/concepts/containers/images\#specifying-imagepullsecrets-on-a-pod +https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod

affinity
- + Kubernetes core/v1.Affinity
@@ -620,6 +1209,74 @@ Total size of messages per channel, 0 means unlimited. Defaults to 1GB

+maxSubs
uint64 +
+

+Maximum number of subscriptions per channel, 0 means unlimited. Defaults +to 1000 +

+
+maxPayload
string +
+

+Maximum number of bytes in a message payload, 0 means unlimited. +Defaults to 1MB +

+
+raftHeartbeatTimeout
string +
+

+Specifies the time in follower state without a leader before attempting +an election, i.e. “72h”, “4h35m”. Defaults to 2s +

+
+raftElectionTimeout
string +
+

+Specifies the time in candidate state without a leader before attempting +an election, i.e. “72h”, “4h35m”. Defaults to 2s +

+
+raftLeaseTimeout
string +
+

+Specifies how long a leader waits without being able to contact a quorum +of nodes before stepping down as leader, i.e. “72h”, “4h35m”. Defaults +to 1s +

+
+raftCommitTimeout
string +
+

+Specifies the time without an Apply() operation before sending an +heartbeat to ensure timely commit, i.e. “72h”, “4h35m”. Defaults to +100ms +

+

@@ -627,6 +1284,7 @@ PersistenceStrategy

(Appears on: +JetStreamBus, NativeStrategy)

@@ -654,21 +1312,21 @@ Description (Optional)

Name of the StorageClass required by the claim. More info: -https://kubernetes.io/docs/concepts/storage/persistent-volumes\#class-1 +https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1

accessMode
- + Kubernetes core/v1.PersistentVolumeAccessMode (Optional)

Available access modes such as ReadWriteOnce, ReadWriteMany -https://kubernetes.io/docs/concepts/storage/persistent-volumes/\#access-modes +https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes

diff --git a/api/event-source.html b/api/event-source.html index 8a4d4503c2..a94d42c38c 100644 --- a/api/event-source.html +++ b/api/event-source.html @@ -213,7 +213,7 @@

AMQPEventSource (Optional)

ExchangeDeclare holds the configuration for the exchange on the server -For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.ExchangeDeclare

+For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare

@@ -230,7 +230,7 @@

AMQPEventSource

QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn’t already exist, or ensures that an existing queue matches the same parameters -For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueDeclare

+For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare

@@ -246,7 +246,7 @@

AMQPEventSource (Optional)

QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key -For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueBind

+For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind

@@ -261,7 +261,7 @@

AMQPEventSource (Optional)

Consume holds the configuration to immediately starts delivering queued messages -For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.Consume

+For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume

@@ -276,6 +276,33 @@

AMQPEventSource

Auth hosts secret selectors for username and password

+ + +urlSecret
+ + +Kubernetes core/v1.SecretKeySelector + + + + +

URLSecret is secret reference for rabbitmq service URL

+ + + + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ +

AMQPExchangeDeclareConfig @@ -457,6 +484,18 @@

AMQPQueueDeclareConfig

NowWait when true, the queue assumes to be declared on the server

+ + +arguments
+ +string + + + +(Optional) +

Arguments of a queue (also known as “x-arguments”) used for optional features and plugins

+ +

AzureEventsHubEventSource @@ -493,7 +532,7 @@

AzureEventsHubEventSourc sharedAccessKeyName
- + Kubernetes core/v1.SecretKeySelector @@ -506,7 +545,7 @@

AzureEventsHubEventSourc sharedAccessKey
- + Kubernetes core/v1.SecretKeySelector @@ -538,17 +577,31 @@

AzureEventsHubEventSourc

Metadata holds the user defined metadata which will passed along the event payload.

+ + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + -

CalendarEventSource +

AzureQueueStorageEventSource

(Appears on: EventSourceSpec)

-

CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. -Schedule takes precedence over interval; interval takes precedence over recurrence

+

AzureQueueStorageEventSource describes the event source for azure queue storage +more info at https://learn.microsoft.com/en-us/azure/storage/queues/

@@ -560,59 +613,68 @@

CalendarEventSource

@@ -629,66 +691,55 @@

CalendarEventSource

- -
-schedule
+storageAccountName
string
-

Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron

+(Optional) +

StorageAccountName is the name of the storage account where the queue is. This field is necessary to +access via Azure AD (managed identity) and it is ignored if ConnectionString is set.

-interval
+connectionString
-string + +Kubernetes core/v1.SecretKeySelector +
-

Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h…

+(Optional) +

ConnectionString is the connection string to access Azure Queue Storage. If this fields is not provided +it will try to access via Azure AD with StorageAccountName.

-exclusionDates
+queueName
-[]string +string
+

QueueName is the name of the queue

-timezone
+jsonBody
-string +bool
(Optional) -

Timezone in which to run the schedule

+

JSONBody specifies that all event body payload coming from this +source will be JSON

-userPayload
+dlq
-encoding/json.RawMessage +bool
(Optional) -

UserPayload will be sent to sensor as extra data once the event is triggered -Deprecated: will be removed in v1.5. Please use Metadata instead.

+

DLQ specifies if a dead-letter queue is configured for messages that can’t be processed successfully. +If set to true, messages with invalid payload won’t be acknowledged to allow to forward them farther to the dead-letter queue. +The default value is false.

-persistence
+filter
- -EventPersistence + +EventSourceFilter
-

Persistence hold the configuration for event persistence

+(Optional) +

Filter

-

CatchupConfiguration -

-

-(Appears on: -EventPersistence) -

-

-

- - - - - - - -
FieldDescription
-enabled
+decodeMessage
bool
-

Enabled enables to triggered the missed schedule when eventsource restarts

+(Optional) +

DecodeMessage specifies if all the messages should be base64 decoded. +If set to true the decoding is done before the evaluation of JSONBody

-maxDuration
+waitTimeInSeconds
-string +int32
-

MaxDuration holds max catchup duration

+(Optional) +

WaitTimeInSeconds is the duration (in seconds) for which the event source waits between empty results from the queue. +The default value is 3 seconds.

-

ConfigMapPersistence +

AzureServiceBusEventSource

(Appears on: -EventPersistence) +EventSourceSpec)

+

AzureServiceBusEventSource describes the event source for azure service bus +More info at https://docs.microsoft.com/en-us/azure/service-bus-messaging/

@@ -700,165 +751,173 @@

ConfigMapPersistence

- -
-name
+connectionString
-string + +Kubernetes core/v1.SecretKeySelector +
-

Name of the configmap

+(Optional) +

ConnectionString is the connection string for the Azure Service Bus. If this fields is not provided +it will try to access via Azure AD with DefaultAzureCredential and FullyQualifiedNamespace.

-createIfNotExist
+queueName
-bool +string
-

CreateIfNotExist will create configmap if it doesn’t exists

+

QueueName is the name of the Azure Service Bus Queue

-

EmitterEventSource -

-

-(Appears on: -EventSourceSpec) -

-

-

EmitterEventSource describes the event source for emitter -More info at https://emitter.io/develop/getting-started/

-

- - - - - - - - + +
FieldDescription
-broker
+topicName
string
-

Broker URI to connect to.

+

TopicName is the name of the Azure Service Bus Topic

-channelKey
+subscriptionName
string
-

ChannelKey refers to the channel key

+

SubscriptionName is the name of the Azure Service Bus Topic Subscription

-channelName
+tls
-string +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
-

ChannelName refers to the channel name

+(Optional) +

TLS configuration for the service bus client

-username
+jsonBody
- -Kubernetes core/v1.SecretKeySelector - +bool
(Optional) -

Username to use to connect to broker

+

JSONBody specifies that all event body payload coming from this +source will be JSON

-password
+metadata
- -Kubernetes core/v1.SecretKeySelector - +map[string]string
(Optional) -

Password to use to connect to broker

+

Metadata holds the user defined metadata which will passed along the event payload.

-connectionBackoff
+filter
-github.com/argoproj/argo-events/pkg/apis/common.Backoff + +EventSourceFilter +
(Optional) -

Backoff holds parameters applied to connection.

+

Filter

-jsonBody
+fullyQualifiedNamespace
-bool +string
(Optional) -

JSONBody specifies that all event body payload coming from this -source will be JSON

+

FullyQualifiedNamespace is the Service Bus namespace name (ex: myservicebus.servicebus.windows.net). This field is necessary to +access via Azure AD (managed identity) and it is ignored if ConnectionString is set.

+

BitbucketAuth +

+

+(Appears on: +BitbucketEventSource) +

+

+

BitbucketAuth holds the different auth strategies for connecting to Bitbucket

+

+ + + + + + + +
FieldDescription
-tls
+basic
-github.com/argoproj/argo-events/pkg/apis/common.TLSConfig + +BitbucketBasicAuth +
(Optional) -

TLS configuration for the emitter client.

+

Basic is BasicAuth auth strategy.

-metadata
+oauthToken
-map[string]string + +Kubernetes core/v1.SecretKeySelector +
(Optional) -

Metadata holds the user defined metadata which will passed along the event payload.

+

OAuthToken refers to the K8s secret that holds the OAuth Bearer token.

-

EventPersistence +

BitbucketBasicAuth

(Appears on: -CalendarEventSource) +BitbucketAuth)

+

BasicAuth holds the information required to authenticate user via basic auth mechanism

@@ -870,36 +929,40 @@

EventPersistence

-catchup
+username
- -CatchupConfiguration + +Kubernetes core/v1.SecretKeySelector
-

Catchup enables to triggered the missed schedule when eventsource restarts

+

Username refers to the K8s secret that holds the username.

-configMap
+password
- -ConfigMapPersistence + +Kubernetes core/v1.SecretKeySelector
-

ConfigMap holds configmap details for persistence

+

Password refers to the K8s secret that holds the password.

-

EventSource +

BitbucketEventSource

-

EventSource is the definition of a eventsource resource

+(Appears on: +EventSourceSpec) +

+

+

BitbucketEventSource describes the event source for Bitbucket

@@ -911,429 +974,432 @@

EventSource

-metadata
+deleteHookOnFinish
- -Kubernetes meta/v1.ObjectMeta - +bool
-Refer to the Kubernetes API documentation for the fields of the -metadata field. +(Optional) +

DeleteHookOnFinish determines whether to delete the defined Bitbucket hook once the event source is stopped.

-spec
+metadata
- -EventSourceSpec - +map[string]string
-
-
- +(Optional) +

Metadata holds the user defined metadata which will be passed along the event payload.

+ + + +
-eventBusName
+webhook
-string + +WebhookContext +
-

EventBusName references to a EventBus name. By default the value is “default”

+

Webhook refers to the configuration required to run an http server

-template
+auth
- -Template + +BitbucketAuth
-(Optional) -

Template is the pod specification for the event source

+

Auth information required to connect to Bitbucket.

-service
+events
- -Service - +[]string
-(Optional) -

Service is the specifications of the service to expose the event source

+

Events this webhook is subscribed to.

-replica
+owner
-int32 +string
-

DeprecatedReplica is the event source deployment replicas -Deprecated: use replicas instead, will be removed in v1.5

+(Optional) +

DeprecatedOwner is the owner of the repository. +Deprecated: use Repositories instead. Will be unsupported in v1.9

-minio
+projectKey
-map[string]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact +string
-

Minio event sources

+(Optional) +

DeprecatedProjectKey is the key of the project to which the repository relates +Deprecated: use Repositories instead. Will be unsupported in v1.9

-calendar
+repositorySlug
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource - +string
-

Calendar event sources

+(Optional) +

DeprecatedRepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL +Deprecated: use Repositories instead. Will be unsupported in v1.9

-file
+repositories
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource + +[]BitbucketRepository
-

File event sources

+(Optional) +

Repositories holds a list of repositories for which integration needs to set up

-resource
+filter
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource + +EventSourceFilter
-

Resource event sources

+(Optional) +

Filter

+

BitbucketRepository +

+

+(Appears on: +BitbucketEventSource) +

+

+

+ + - - + + + + + +
-webhook
- - -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext - - -
-

Webhook event sources

-
FieldDescription
-amqp
+owner
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource - +string
-

AMQP event sources

+

Owner is the owner of the repository

-kafka
+repositorySlug
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource - +string
-

Kafka event sources

+

RepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL

+

BitbucketServerEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

BitbucketServerEventSource refers to event-source related to Bitbucket Server events

+

+ + - - + + + + + +
-mqtt
- - -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource - - -
-

MQTT event sources

-
FieldDescription
-nats
+webhook
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource + +WebhookContext
-

NATS event sources

+

Webhook holds configuration to run a http server.

-sns
+projectKey
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource - +string
-

SNS event sources

+(Optional) +

DeprecatedProjectKey is the key of project for which integration needs to set up. +Deprecated: use Repositories instead. Will be unsupported in v1.8.

-sqs
+repositorySlug
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource - +string
-

SQS event sources

+(Optional) +

DeprecatedRepositorySlug is the slug of the repository for which integration needs to set up. +Deprecated: use Repositories instead. Will be unsupported in v1.8.

-pubSub
+projects
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource - +[]string
-

PubSub event sources

+(Optional) +

Projects holds a list of projects for which integration needs to set up, this will add the webhook to all repositories in the project.

-github
+repositories
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource + +[]BitbucketServerRepository
-

Github event sources

+(Optional) +

Repositories holds a list of repositories for which integration needs to set up.

-gitlab
+events
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource - +[]string
-

Gitlab event sources

+(Optional) +

Events are bitbucket event to listen to. +Refer https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html

-hdfs
+skipBranchRefsChangedOnOpenPR
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource - +bool
-

HDFS event sources

+(Optional) +

SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for branches whenever there’s an associated open pull request. +This helps in optimizing the event handling process by avoiding unnecessary triggers for branch reference changes that are already part of a pull request under review.

-slack
+accessToken
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource + +Kubernetes core/v1.SecretKeySelector
-

Slack event sources

+

AccessToken is reference to K8s secret which holds the bitbucket api access information.

-storageGrid
+webhookSecret
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource + +Kubernetes core/v1.SecretKeySelector
-

StorageGrid event sources

+

WebhookSecret is reference to K8s secret which holds the bitbucket webhook secret (for HMAC validation).

-azureEventsHub
+bitbucketserverBaseURL
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource - +string
-

AzureEventsHub event sources

+

BitbucketServerBaseURL is the base URL for API requests to a custom endpoint.

-stripe
+deleteHookOnFinish
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource - +bool
-

Stripe event sources

+(Optional) +

DeleteHookOnFinish determines whether to delete the Bitbucket Server hook for the project once the event source is stopped.

-emitter
+metadata
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource - +map[string]string
-

Emitter event source

+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

-redis
+filter
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource + +EventSourceFilter
-

Redis event source

+(Optional) +

Filter

-nsq
+tls
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource - +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
-

NSQ event source

+(Optional) +

TLS configuration for the bitbucketserver client.

-pulsar
+checkInterval
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource - +string
-

Pulsar event source

+(Optional) +

CheckInterval is a duration in which to wait before checking that the webhooks exist, e.g. 1s, 30m, 2h… (defaults to 1m)

+

BitbucketServerRepository +

+

+(Appears on: +BitbucketServerEventSource) +

+

+

+ + - - + + + + - -
-generic
- - -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource - - -
-

Generic event source

-
FieldDescription
-replicas
+projectKey
-int32 +string
-

Replicas is the event source deployment replicas

-
+

ProjectKey is the key of project for which integration needs to set up.

-status
+repositorySlug
- -EventSourceStatus - +string
-(Optional) +

RepositorySlug is the slug of the repository for which integration needs to set up.

-

EventSourceSpec +

CalendarEventSource

(Appears on: -EventSource) +EventSourceSpec)

-

EventSourceSpec refers to specification of event-source resource

+

CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. +Schedule takes precedence over interval; interval takes precedence over recurrence

@@ -1345,386 +1411,1619 @@

EventSourceSpec

+ + + + + +
-eventBusName
+schedule
string
-

EventBusName references to a EventBus name. By default the value is “default”

+(Optional) +

Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron

-template
+interval
- -Template - +string
(Optional) -

Template is the pod specification for the event source

+

Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h…

-service
+exclusionDates
- -Service - +[]string + +
+

ExclusionDates defines the list of DATE-TIME exceptions for recurring events.

+
+timezone
+ +string
(Optional) -

Service is the specifications of the service to expose the event source

+

Timezone in which to run the schedule

-replica
+metadata
-int32 +map[string]string
-

DeprecatedReplica is the event source deployment replicas -Deprecated: use replicas instead, will be removed in v1.5

+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

-minio
+persistence
-map[string]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact + +EventPersistence +
-

Minio event sources

+

Persistence hold the configuration for event persistence

-calendar
+filter
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource + +EventSourceFilter
-

Calendar event sources

+(Optional) +

Filter

+

CatchupConfiguration +

+

+(Appears on: +EventPersistence) +

+

+

+ + + + + + + + + + + + + + + +
FieldDescription
-file
+enabled
+ +bool + +
+

Enabled enables to triggered the missed schedule when eventsource restarts

+
+maxDuration
+ +string + +
+

MaxDuration holds max catchup duration

+
+

ConfigMapPersistence +

+

+(Appears on: +EventPersistence) +

+

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+name
+ +string + +
+

Name of the configmap

+
+createIfNotExist
+ +bool + +
+

CreateIfNotExist will create configmap if it doesn’t exists

+
+

EmitterEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

EmitterEventSource describes the event source for emitter +More info at https://emitter.io/develop/getting-started/

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+broker
+ +string + +
+

Broker URI to connect to.

+
+channelKey
+ +string + +
+

ChannelKey refers to the channel key

+
+channelName
+ +string + +
+

ChannelName refers to the channel name

+
+username
+ + +Kubernetes core/v1.SecretKeySelector + + +
+(Optional) +

Username to use to connect to broker

+
+password
+ + +Kubernetes core/v1.SecretKeySelector + + +
+(Optional) +

Password to use to connect to broker

+
+connectionBackoff
+ +github.com/argoproj/argo-events/pkg/apis/common.Backoff + +
+(Optional) +

Backoff holds parameters applied to connection.

+
+jsonBody
+ +bool + +
+(Optional) +

JSONBody specifies that all event body payload coming from this +source will be JSON

+
+tls
+ +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig + +
+(Optional) +

TLS configuration for the emitter client.

+
+metadata
+ +map[string]string + +
+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

+
+filter
+ + +EventSourceFilter + + +
+(Optional) +

Filter

+
+

EventPersistence +

+

+(Appears on: +CalendarEventSource) +

+

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+catchup
+ + +CatchupConfiguration + + +
+

Catchup enables to triggered the missed schedule when eventsource restarts

+
+configMap
+ + +ConfigMapPersistence + + +
+

ConfigMap holds configmap details for persistence

+
+

EventSource +

+

+

EventSource is the definition of a eventsource resource

+

+ + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+metadata
+ + +Kubernetes meta/v1.ObjectMeta + + +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ + +EventSourceSpec + + +
+
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+eventBusName
+ +string + +
+

EventBusName references to a EventBus name. By default the value is “default”

+
+template
+ + +Template + + +
+(Optional) +

Template is the pod specification for the event source

+
+service
+ + +Service + + +
+(Optional) +

Service is the specifications of the service to expose the event source

+
+minio
+ +map[string]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact + +
+

Minio event sources

+
+calendar
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource + + +
+

Calendar event sources

+
+file
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource + + +
+

File event sources

+
+resource
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource + + +
+

Resource event sources

+
+webhook
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookEventSource + + +
+

Webhook event sources

+
+amqp
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource + + +
+

AMQP event sources

+
+kafka
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource + + +
+

Kafka event sources

+
+mqtt
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource + + +
+

MQTT event sources

+
+nats
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource + + +
+

NATS event sources

+
+sns
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource + + +
+

SNS event sources

+
+sqs
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource + + +
+

SQS event sources

+
+pubSub
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource + + +
+

PubSub event sources

+
+github
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource + + +
+

Github event sources

+
+gitlab
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource + + +
+

Gitlab event sources

+
+hdfs
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource + + +
+

HDFS event sources

+
+slack
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource + + +
+

Slack event sources

+
+storageGrid
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource + + +
+

StorageGrid event sources

+
+azureEventsHub
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource + + +
+

AzureEventsHub event sources

+
+stripe
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource + + +
+

Stripe event sources

+
+emitter
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource + + +
+

Emitter event source

+
+redis
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource + + +
+

Redis event source

+
+nsq
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource + + +
+

NSQ event source

+
+pulsar
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource + + +
+

Pulsar event source

+
+generic
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource + + +
+

Generic event source

+
+replicas
+ +int32 + +
+

Replicas is the event source deployment replicas

+
+bitbucketserver
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerEventSource + + +
+

Bitbucket Server event sources

+
+bitbucket
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketEventSource + + +
+

Bitbucket event sources

+
+redisStream
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisStreamEventSource + + +
+

Redis stream source

+
+azureServiceBus
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureServiceBusEventSource + + +
+

Azure Service Bus event source

+
+azureQueueStorage
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureQueueStorageEventSource + + +
+

AzureQueueStorage event source

+
+sftp
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SFTPEventSource + + +
+

SFTP event sources

+
+gerrit
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GerritEventSource + + +
+

Gerrit event source

+
+
+status
+ + +EventSourceStatus + + +
+(Optional) +
+

EventSourceFilter +

+

+(Appears on: +AMQPEventSource, +AzureEventsHubEventSource, +AzureQueueStorageEventSource, +AzureServiceBusEventSource, +BitbucketEventSource, +BitbucketServerEventSource, +CalendarEventSource, +EmitterEventSource, +FileEventSource, +GenericEventSource, +GerritEventSource, +GithubEventSource, +GitlabEventSource, +HDFSEventSource, +KafkaEventSource, +MQTTEventSource, +NATSEventsSource, +NSQEventSource, +PubSubEventSource, +PulsarEventSource, +RedisEventSource, +RedisStreamEventSource, +SFTPEventSource, +SNSEventSource, +SQSEventSource, +SlackEventSource, +WebhookEventSource) +

+

+

+ + + + + + + + + + + + + +
FieldDescription
+expression
+ +string + +
+
+

EventSourceSpec +

+

+(Appears on: +EventSource) +

+

+

EventSourceSpec refers to specification of event-source resource

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+eventBusName
+ +string + +
+

EventBusName references to a EventBus name. By default the value is “default”

+
+template
+ + +Template + + +
+(Optional) +

Template is the pod specification for the event source

+
+service
+ + +Service + + +
+(Optional) +

Service is the specifications of the service to expose the event source

+
+minio
+ +map[string]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact + +
+

Minio event sources

+
+calendar
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource + + +
+

Calendar event sources

+
+file
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource + + +
+

File event sources

+
+resource
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource + + +
+

Resource event sources

+
+webhook
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookEventSource + + +
+

Webhook event sources

+
+amqp
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource + + +
+

AMQP event sources

+
+kafka
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource + + +
+

Kafka event sources

+
+mqtt
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource + + +
+

MQTT event sources

+
+nats
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource + + +
+

NATS event sources

+
+sns
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource + + +
+

SNS event sources

+
+sqs
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource + + +
+

SQS event sources

+
+pubSub
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource + + +
+

PubSub event sources

+
+github
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource + + +
+

Github event sources

+
+gitlab
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource + + +
+

Gitlab event sources

+
+hdfs
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource + + +
+

HDFS event sources

+
+slack
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource + + +
+

Slack event sources

+
+storageGrid
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource + + +
+

StorageGrid event sources

+
+azureEventsHub
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource + + +
+

AzureEventsHub event sources

+
+stripe
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource + + +
+

Stripe event sources

+
+emitter
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource + + +
+

Emitter event source

+
+redis
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource + + +
+

Redis event source

+
+nsq
+ + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource + + +
+

NSQ event source

+
+pulsar
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource
-

File event sources

+

Pulsar event source

-resource
+generic
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource
-

Resource event sources

+

Generic event source

-webhook
+replicas
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext - +int32
-

Webhook event sources

+

Replicas is the event source deployment replicas

-amqp
+bitbucketserver
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerEventSource
-

AMQP event sources

+

Bitbucket Server event sources

-kafka
+bitbucket
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketEventSource
-

Kafka event sources

+

Bitbucket event sources

-mqtt
+redisStream
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisStreamEventSource
-

MQTT event sources

+

Redis stream source

-nats
+azureServiceBus
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureServiceBusEventSource
-

NATS event sources

+

Azure Service Bus event source

-sns
+azureQueueStorage
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureQueueStorageEventSource
-

SNS event sources

+

AzureQueueStorage event source

-sqs
+sftp
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SFTPEventSource
-

SQS event sources

+

SFTP event sources

-pubSub
+gerrit
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource + +map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GerritEventSource
-

PubSub event sources

+

Gerrit event source

+

EventSourceStatus +

+

+(Appears on: +EventSource) +

+

+

EventSourceStatus holds the status of the event-source resource

+

+ + + + + + + + + +
FieldDescription
-github
+Status
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource - +github.com/argoproj/argo-events/pkg/apis/common.Status
-

Github event sources

+

+(Members of Status are embedded into this type.) +

+

FileEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

FileEventSource describes an event-source for file related events.

+

+ + + + + + + + + +
FieldDescription
-gitlab
+eventType
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource - +string
-

Gitlab event sources

+

Type of file operations to watch +Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information

-hdfs
+watchPathConfig
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource + +WatchPathConfig
-

HDFS event sources

+

WatchPathConfig contains configuration about the file path to watch

-slack
+polling
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource - +bool
-

Slack event sources

+

Use polling instead of inotify

-storageGrid
+metadata
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource - +map[string]string
-

StorageGrid event sources

+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

-azureEventsHub
+filter
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource + +EventSourceFilter
-

AzureEventsHub event sources

+(Optional) +

Filter

+

GenericEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

GenericEventSource refers to a generic event source. It can be used to implement a custom event source.

+

+ + + + + + + +
FieldDescription
-stripe
+url
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource - +string
-

Stripe event sources

+

URL of the gRPC server that implements the event source.

-emitter
+config
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource - +string
-

Emitter event source

+

Config is the event source configuration

-redis
+insecure
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource - +bool
-

Redis event source

+

Insecure determines the type of connection.

-nsq
+jsonBody
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource - +bool
-

NSQ event source

+(Optional) +

JSONBody specifies that all event body payload coming from this +source will be JSON

-pulsar
+metadata
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource - +map[string]string
-

Pulsar event source

+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

-generic
+authSecret
- -map[string]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource + +Kubernetes core/v1.SecretKeySelector
-

Generic event source

+(Optional) +

AuthSecret holds a secret selector that contains a bearer token for authentication

-replicas
+filter
-int32 + +EventSourceFilter +
-

Replicas is the event source deployment replicas

+(Optional) +

Filter

-

EventSourceStatus +

GerritEventSource

(Appears on: -EventSource) +EventSourceSpec)

-

EventSourceStatus holds the status of the event-source resource

+

GerritEventSource refers to event-source related to gerrit events

@@ -1736,70 +3035,73 @@

EventSourceStatus

- -
-Status
+webhook
-github.com/argoproj/argo-events/pkg/apis/common.Status + +WebhookContext +
-

-(Members of Status are embedded into this type.) -

+

Webhook holds configuration to run a http server

-

FileEventSource -

-

-(Appears on: -EventSourceSpec) -

-

-

FileEventSource describes an event-source for file related events.

-

- - - - + + + + + + - - @@ -1814,95 +3116,94 @@

FileEventSource

Metadata holds the user defined metadata which will passed along the event payload.

- -
FieldDescription +hookName
+ +string + +
+

HookName is the name of the webhook

+
+events
+ +[]string + +
+

Events are gerrit event to listen to. +Refer https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events

+
-eventType
+auth
-string +github.com/argoproj/argo-events/pkg/apis/common.BasicAuth
-

Type of file operations to watch -Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information

+(Optional) +

Auth hosts secret selectors for username and password

-watchPathConfig
+gerritBaseURL
- -WatchPathConfig - +string
-

WatchPathConfig contains configuration about the file path to watch

+

GerritBaseURL is the base URL for API requests to a custom endpoint

-polling
+deleteHookOnFinish
bool
-

Use polling instead of inotify

+(Optional) +

DeleteHookOnFinish determines whether to delete the Gerrit hook for the project once the event source is stopped.

-

GenericEventSource -

-

-(Appears on: -EventSourceSpec) -

-

-

GenericEventSource refers to a generic event source. It can be used to implement a custom event source.

-

- - - - - - - - + +
FieldDescription
-url
+projects
-string +[]string
-

URL of the gRPC server that implements the event source.

+

List of project namespace paths like “whynowy/test”.

-config
+sslVerify
-string +bool
-

Config is the event source configuration

+(Optional) +

SslVerify to enable ssl verification

-insecure
+filter
-bool + +EventSourceFilter +
-

Insecure determines the type of connection.

+(Optional) +

Filter

+

GithubAppCreds +

+

+(Appears on: +GithubEventSource) +

+

+

+ + + + + + + + @@ -1932,6 +3233,7 @@

GithubEventSource

@@ -1957,6 +3259,7 @@

GithubEventSource

@@ -1969,6 +3272,7 @@

GithubEventSource

@@ -1981,13 +3285,14 @@

GithubEventSource

+ + + + + + + + + + + + + + + + @@ -2149,7 +3508,9 @@

GitlabEventSource

@@ -2168,13 +3529,13 @@

GitlabEventSource

@@ -2224,6 +3585,58 @@

GitlabEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+ + + + + + + + + + + + + + + +
FieldDescription
-jsonBody
+privateKey
-bool + +Kubernetes core/v1.SecretKeySelector +
-(Optional) -

JSONBody specifies that all event body payload coming from this -source will be JSON

+

PrivateKey refers to a K8s secret containing the GitHub app private key

-metadata
+appID
-map[string]string +int64
-(Optional) -

Metadata holds the user defined metadata which will passed along the event payload.

+

AppID refers to the GitHub App ID for the application you created

-authSecret
+installationID
- -Kubernetes core/v1.SecretKeySelector - +int64
-(Optional) -

AuthSecret holds a secret selector that contains a bearer token for authentication

+

InstallationID refers to the Installation ID of the GitHub app you created and installed

+(Optional)

Id is the webhook’s id Deprecated: This is not used at all, will be removed in v1.6

+(Optional)

DeprecatedOwner refers to GitHub owner name i.e. argoproj Deprecated: use Repositories instead. Will be unsupported in v 1.6

+(Optional)

DeprecatedRepository refers to GitHub repo name i.e. argo-events Deprecated: use Repositories instead. Will be unsupported in v 1.6

+

Events refer to Github events to which the event source will subscribe

apiToken
- + Kubernetes core/v1.SecretKeySelector @@ -2001,7 +3306,7 @@

GithubEventSource

webhookSecret
- + Kubernetes core/v1.SecretKeySelector @@ -2106,7 +3411,61 @@

GithubEventSource

Repositories holds the information of repositories, which uses repo owner as the key, -and list of repo names as the value

+and list of repo names as the value. Not required if Organizations is set.

+
+organizations
+ +[]string + +
+

Organizations holds the names of organizations (used for organization level webhooks). Not required if Repositories is set.

+
+githubApp
+ + +GithubAppCreds + + +
+(Optional) +

GitHubApp holds the GitHub app credentials

+
+filter
+ + +EventSourceFilter + + +
+(Optional) +

Filter

+
+payloadEnrichment
+ + +PayloadEnrichmentFlags + + +
+(Optional) +

PayloadEnrichment holds flags that determine whether to enrich GitHub’s original payload with +additional information.

-

ProjectID is the id of project for which integration needs to setup

+(Optional) +

DeprecatedProjectID is the id of project for which integration needs to setup +Deprecated: use Projects instead. Will be unsupported in v 1.7

accessToken
- + Kubernetes core/v1.SecretKeySelector
-

AccessToken is reference to k8 secret which holds the gitlab api access information

+

AccessToken references to k8 secret which holds the gitlab api access information

+projects
+ +[]string + +
+(Optional) +

List of project IDs or project namespace paths like “whynowy/test”. Projects and groups cannot be empty at the same time.

+
+secretToken
+ + +Kubernetes core/v1.SecretKeySelector + + +
+

SecretToken references to k8 secret which holds the Secret Token used by webhook config

+
+filter
+ + +EventSourceFilter + + +
+(Optional) +

Filter

+
+groups
+ +[]string + +
+(Optional) +

List of group IDs or group name like “test”. +Group level hook available in Premium and Ultimate Gitlab.

+

HDFSEventSource @@ -2306,7 +3719,7 @@

HDFSEventSource krbCCacheSecret
- + Kubernetes core/v1.SecretKeySelector @@ -2320,7 +3733,7 @@

HDFSEventSource krbKeytabSecret
- + Kubernetes core/v1.SecretKeySelector @@ -2358,7 +3771,7 @@

HDFSEventSource krbConfigConfigMap
- + Kubernetes core/v1.ConfigMapKeySelector @@ -2392,6 +3805,20 @@

HDFSEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+ + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ +

KafkaConsumerGroup @@ -2483,6 +3910,7 @@

KafkaEventSource +(Optional)

Partition name

@@ -2595,6 +4023,39 @@

KafkaEventSource

SASL configuration for the kafka client

+ + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + + + +config
+ +string + + + +(Optional) +

Yaml format Sarama config for Kafka connection. +It follows the struct of sarama.Config. See https://github.com/IBM/sarama/blob/main/config.go +e.g.

+

consumer: +fetch: +min: 1 +net: +MaxOpenRequests: 5

+ +

MQTTEventSource @@ -2695,6 +4156,32 @@

MQTTEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+ + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + + + +auth
+ +github.com/argoproj/argo-events/pkg/apis/common.BasicAuth + + + +(Optional) +

Auth hosts secret selectors for username and password

+ +

NATSAuth @@ -2730,7 +4217,7 @@

NATSAuth token
- + Kubernetes core/v1.SecretKeySelector @@ -2744,7 +4231,7 @@

NATSAuth nkey
- + Kubernetes core/v1.SecretKeySelector @@ -2758,7 +4245,7 @@

NATSAuth credential
- + Kubernetes core/v1.SecretKeySelector @@ -2868,7 +4355,34 @@

NATSEventsSource (Optional) -

Auth information

+

Auth information

+ + + + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + + + +queue
+ +string + + + +(Optional) +

Queue is the name of the queue group to subscribe as if specified. Uses QueueSubscribe +logic to subscribe as queue group. If the queue is empty, uses default Subscribe logic.

@@ -2973,6 +4487,20 @@

NSQEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+ + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ +

OwnedRepositories @@ -2999,7 +4527,7 @@

OwnedRepositories -

Orgnization or user name

+

Organization or user name

@@ -3015,6 +4543,37 @@

OwnedRepositories +

PayloadEnrichmentFlags +

+

+(Appears on: +GithubEventSource) +

+

+

+ + + + + + + + + + + + + +
FieldDescription
+fetchPROnPRCommentAdded
+ +bool + +
+(Optional) +

FetchPROnPRCommentAdded determines whether to enrich the payload provided by GitHub +on “pull request comment added” events, with the full pull request info

+

PubSubEventSource

@@ -3093,7 +4652,7 @@

PubSubEventSource credentialSecret
- + Kubernetes core/v1.SecretKeySelector @@ -3132,26 +4691,28 @@

PubSubEventSource -credentialsFile
+metadata
-string +map[string]string -

CredentialsFile is the file that contains credentials to authenticate for GCP -Deprecated: will be removed in v1.5, use CredentialSecret instead

+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

-metadata
+filter
-map[string]string + +EventSourceFilter + (Optional) -

Metadata holds the user defined metadata which will passed along the event payload.

+

Filter

@@ -3213,7 +4774,7 @@

PulsarEventSource tlsTrustCertsSecret
- + Kubernetes core/v1.SecretKeySelector @@ -3296,6 +4857,64 @@

PulsarEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+ + +authTokenSecret
+ + +Kubernetes core/v1.SecretKeySelector + + + + +(Optional) +

Authentication token for the pulsar client. +Either token or athenz can be set to use auth.

+ + + + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + + + +authAthenzParams
+ +map[string]string + + + +(Optional) +

Authentication athenz parameters for the pulsar client. +Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go +Either token or athenz can be set to use auth.

+ + + + +authAthenzSecret
+ + +Kubernetes core/v1.SecretKeySelector + + + + +(Optional) +

Authentication athenz privateKey secret for the pulsar client. +AuthAthenzSecret must be set if AuthAthenzParams is used.

+ +

RedisEventSource @@ -3331,7 +4950,7 @@

RedisEventSource password
- + Kubernetes core/v1.SecretKeySelector @@ -3367,36 +4986,220 @@

RedisEventSource -channels
+channels
+ +[]string + + + + + + + +tls
+ +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig + + + +(Optional) +

TLS configuration for the redis client.

+ + + + +metadata
+ +map[string]string + + + +(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

+ + + + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + + + +jsonBody
+ +bool + + + +(Optional) +

JSONBody specifies that all event body payload coming from this +source will be JSON

+ + + + +username
+ +string + + + +(Optional) +

Username required for ACL style authentication if any.

+ + + + +

RedisStreamEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

RedisStreamEventSource describes an event source for +Redis streams (https://redis.io/topics/streams-intro)

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -3448,7 +5251,7 @@

ResourceEventSource

+ + + +
FieldDescription
+hostAddress
+ +string + +
+

HostAddress refers to the address of the Redis host/server (master instance)

+
+password
+ + +Kubernetes core/v1.SecretKeySelector + + +
+(Optional) +

Password required for authentication if any.

+
+db
+ +int32 + +
+(Optional) +

DB to use. If not specified, default DB 0 will be used.

+
+streams
+ +[]string + +
+

Streams to look for entries. XREADGROUP is used on all streams using a single consumer group.

+
+maxMsgCountPerRead
+ +int32 + +
+(Optional) +

MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams +Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. +Same as COUNT option in XREADGROUP(https://redis.io/topics/streams-intro). Defaults to 10

+
+consumerGroup
+ +string + +
+(Optional) +

ConsumerGroup refers to the Redis stream consumer group that will be +created on all redis streams. Messages are read through this group. Defaults to ‘argo-events-cg’

+
+tls
+ +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig + +
+(Optional) +

TLS configuration for the redis client.

+
+metadata
-[]string +map[string]string
+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

-tls
+filter
-github.com/argoproj/argo-events/pkg/apis/common.TLSConfig + +EventSourceFilter +
(Optional) -

TLS configuration for the redis client.

+

Filter

-metadata
+username
-map[string]string +string
(Optional) -

Metadata holds the user defined metadata which will passed along the event payload.

+

Username required for ACL style authentication if any.

GroupVersionResource
- + Kubernetes meta/v1.GroupVersionResource @@ -3486,6 +5289,17 @@

ResourceEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+cluster
+ +string + +
+

Cluster from which events will be listened to

+

ResourceEventType @@ -3504,7 +5318,7 @@

ResourceFilter ResourceEventSource)

-

ResourceFilter contains K8 ObjectMeta information to further filter resource event objects

+

ResourceFilter contains K8s ObjectMeta information to further filter resource event objects

@@ -3538,7 +5352,11 @@

ResourceFilter

@@ -3563,7 +5381,7 @@

ResourceFilter

(Optional)

Labels provide listing options to K8s API to watch resource/s. -Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info.

+Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info. +Unlike K8s field selector, multiple values are passed as comma separated values instead of list of values. +Eg: value: value1,value2. +Same as K8s label selector, operator “=”, “==”, “!=”, “exists”, “!”, “notin”, “in”, “gt” and “lt” +are supported

createdBy
- + Kubernetes meta/v1.Time @@ -3587,6 +5405,141 @@

ResourceFilter

+

SFTPEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

SFTPEventSource describes an event-source for sftp related events.

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+eventType
+ +string + +
+

Type of file operations to watch +Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information

+
+watchPathConfig
+ + +WatchPathConfig + + +
+

WatchPathConfig contains configuration about the file path to watch

+
+username
+ + +Kubernetes core/v1.SecretKeySelector + + +
+

Username required for authentication if any.

+
+password
+ + +Kubernetes core/v1.SecretKeySelector + + +
+

Password required for authentication if any.

+
+sshKeySecret
+ + +Kubernetes core/v1.SecretKeySelector + + +
+

SSHKeySecret refers to the secret that contains SSH key. Key needs to contain private key and public key.

+
+address
+ + +Kubernetes core/v1.SecretKeySelector + + +
+

Address sftp address.

+
+metadata
+ +map[string]string + +
+(Optional) +

Metadata holds the user defined metadata which will passed along the event payload.

+
+filter
+ + +EventSourceFilter + + +
+(Optional) +

Filter

+
+pollIntervalDuration
+ +string + +
+(Optional) +

PollIntervalDuration the interval at which to poll the SFTP server +defaults to 10 seconds

+

SNSEventSource

@@ -3632,26 +5585,26 @@

SNSEventSource accessKey
- + Kubernetes core/v1.SecretKeySelector -

AccessKey refers K8 secret containing aws access key

+

AccessKey refers K8s secret containing aws access key

secretKey
- + Kubernetes core/v1.SecretKeySelector -

SecretKey refers K8 secret containing aws secret key

+

SecretKey refers K8s secret containing aws secret key

@@ -3701,6 +5654,32 @@

SNSEventSource

ValidateSignature is boolean that can be set to true for SNS signature verification

+ + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + + + +endpoint
+ +string + + + +(Optional) +

Endpoint configures connection to a specific SNS endpoint instead of Amazons servers

+ +

SQSEventSource @@ -3724,26 +5703,26 @@

SQSEventSource accessKey
- + Kubernetes core/v1.SecretKeySelector -

AccessKey refers K8 secret containing aws access key

+

AccessKey refers K8s secret containing aws access key

secretKey
- + Kubernetes core/v1.SecretKeySelector -

SecretKey refers K8 secret containing aws secret key

+

SecretKey refers K8s secret containing aws secret key

@@ -3829,6 +5808,60 @@

SQSEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+ + +dlq
+ +bool + + + +(Optional) +

DLQ specifies if a dead-letter queue is configured for messages that can’t be processed successfully. +If set to true, messages with invalid payload won’t be acknowledged to allow to forward them farther to the dead-letter queue. +The default value is false.

+ + + + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ + + + +endpoint
+ +string + + + +(Optional) +

Endpoint configures connection to a specific SQS endpoint instead of Amazons servers

+ + + + +sessionToken
+ + +Kubernetes core/v1.SecretKeySelector + + + + +(Optional) +

SessionToken refers to K8s secret containing AWS temporary credentials(STS) session token

+ +

Selector @@ -3868,7 +5901,7 @@

Selector (Optional) -

Supported operations like ==, !=, <=, >= etc. +

Supported operations like ==, != etc. Defaults to ==. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info.

@@ -3907,7 +5940,7 @@

Service ports
- + []Kubernetes core/v1.ServicePort @@ -3957,7 +5990,7 @@

SlackEventSource signingSecret
- + Kubernetes core/v1.SecretKeySelector @@ -3970,7 +6003,7 @@

SlackEventSource token
- + Kubernetes core/v1.SecretKeySelector @@ -4004,6 +6037,20 @@

SlackEventSource

Metadata holds the user defined metadata which will passed along the event payload.

+ + +filter
+ + +EventSourceFilter + + + + +(Optional) +

Filter

+ +

StorageGridEventSource @@ -4098,7 +6145,7 @@

StorageGridEventSource authToken
- + Kubernetes core/v1.SecretKeySelector @@ -4218,7 +6265,7 @@

StripeEventSource apiKey
- + Kubernetes core/v1.SecretKeySelector @@ -4300,7 +6347,7 @@

Template container
- + Kubernetes core/v1.Container @@ -4314,7 +6361,7 @@

Template volumes
- + []Kubernetes core/v1.Volume @@ -4328,7 +6375,7 @@

Template securityContext
- + Kubernetes core/v1.PodSecurityContext @@ -4343,7 +6390,7 @@

Template affinity
- + Kubernetes core/v1.Affinity @@ -4357,7 +6404,7 @@

Template tolerations
- + []Kubernetes core/v1.Toleration @@ -4385,7 +6432,7 @@

Template imagePullSecrets
- + []Kubernetes core/v1.LocalObjectReference @@ -4440,7 +6487,8 @@

WatchPathConfig

(Appears on: FileEventSource, -HDFSEventSource) +HDFSEventSource, +SFTPEventSource)

@@ -4491,13 +6539,16 @@

WebhookContext

(Appears on: -EventSourceSpec, +BitbucketEventSource, +BitbucketServerEventSource, +GerritEventSource, GithubEventSource, GitlabEventSource, SNSEventSource, SlackEventSource, StorageGridEventSource, -StripeEventSource) +StripeEventSource, +WebhookEventSource)

WebhookContext holds a general purpose REST API context

@@ -4559,7 +6610,7 @@

WebhookContext serverCertSecret
- + Kubernetes core/v1.SecretKeySelector @@ -4572,7 +6623,7 @@

WebhookContext serverKeySecret
- + Kubernetes core/v1.SecretKeySelector @@ -4597,7 +6648,7 @@

WebhookContext authSecret
- + Kubernetes core/v1.SecretKeySelector @@ -4609,24 +6660,64 @@

WebhookContext -serverCertPath
+maxPayloadSize
-string +int64 + + + +(Optional) +

MaxPayloadSize is the maximum webhook payload size that the server will accept. +Requests exceeding that limit will be rejected with “request too large” response. +Default value: 1048576 (1MB).

+ + + + +

WebhookEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

CalendarEventSource describes an HTTP based EventSource

+

+ + + + + + + + + + diff --git a/api/event-source.md b/api/event-source.md index 0dbe307170..e17dfd860e 100644 --- a/api/event-source.md +++ b/api/event-source.md @@ -226,7 +226,7 @@ AMQPExchangeDeclareConfig

ExchangeDeclare holds the configuration for the exchange on the server For more information, visit -https://godoc.org/github.com/streadway/amqp\#Channel.ExchangeDeclare +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare

@@ -243,7 +243,7 @@ QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn’t already exist, or ensures that an existing queue matches the same parameters For more information, visit -https://godoc.org/github.com/streadway/amqp\#Channel.QueueDeclare +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare

@@ -260,7 +260,7 @@ QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key For more information, visit -https://godoc.org/github.com/streadway/amqp\#Channel.QueueBind +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind

@@ -275,7 +275,7 @@ information, visit

Consume holds the configuration to immediately starts delivering queued messages For more information, visit -https://godoc.org/github.com/streadway/amqp\#Channel.Consume +https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume

@@ -291,6 +291,31 @@ Auth hosts secret selectors for username and password

+ + + + + + + +
FieldDescription
+WebhookContext
+ + +WebhookContext +
-

DeprecatedServerCertPath refers the file that contains the cert.

+

+(Members of WebhookContext are embedded into this type.) +

-serverKeyPath
+filter
-string + +EventSourceFilter +
-

DeprecatedServerKeyPath refers the file that contains private key

+(Optional) +

Filter

+urlSecret
+ +Kubernetes core/v1.SecretKeySelector +
+

+URLSecret is secret reference for rabbitmq service URL +

+
+filter
+ EventSourceFilter + +
+(Optional) +

+Filter +

+

@@ -488,6 +513,18 @@ NowWait when true, the queue assumes to be declared on the server

+ + +arguments
string + + +(Optional) +

+Arguments of a queue (also known as “x-arguments”) used for optional +features and plugins +

+ +

@@ -530,7 +567,7 @@ FQDN of the EventHubs namespace you created More info at sharedAccessKeyName
- + Kubernetes core/v1.SecretKeySelector @@ -543,7 +580,7 @@ keys sharedAccessKey
- + Kubernetes core/v1.SecretKeySelector @@ -574,10 +611,23 @@ event payload.

+ + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ + -

-CalendarEventSource +

+AzureQueueStorageEventSource

(Appears on: @@ -585,9 +635,9 @@ CalendarEventSource

-CalendarEventSource describes a time based dependency. One of the fields -(schedule, interval, or recurrence) must be passed. Schedule takes -precedence over interval; interval takes precedence over recurrence +AzureQueueStorageEventSource describes the event source for azure queue +storage more info at +https://learn.microsoft.com/en-us/azure/storage/queues/

@@ -604,54 +654,65 @@ Description @@ -669,70 +730,57 @@ event payload. - -
-schedule
string +storageAccountName
string
+(Optional)

-Schedule is a cron-like expression. For reference, see: -https://en.wikipedia.org/wiki/Cron +StorageAccountName is the name of the storage account where the queue +is. This field is necessary to access via Azure AD (managed identity) +and it is ignored if ConnectionString is set.

-interval
string +connectionString
+ +Kubernetes core/v1.SecretKeySelector
+(Optional)

-Interval is a string that describes an interval duration, e.g. 1s, 30m, -2h… +ConnectionString is the connection string to access Azure Queue Storage. +If this fields is not provided it will try to access via Azure AD with +StorageAccountName.

-exclusionDates
\[\]string +queueName
string
+

+QueueName is the name of the queue +

-timezone
string +jsonBody
bool
(Optional)

-Timezone in which to run the schedule +JSONBody specifies that all event body payload coming from this source +will be JSON

-userPayload
encoding/json.RawMessage +dlq
bool
(Optional)

-UserPayload will be sent to sensor as extra data once the event is -triggered Deprecated: will be removed in v1.5. Please use Metadata -instead. +DLQ specifies if a dead-letter queue is configured for messages that +can’t be processed successfully. If set to true, messages with invalid +payload won’t be acknowledged to allow to forward them farther to the +dead-letter queue. The default value is false.

-persistence
- EventPersistence - +filter
+ EventSourceFilter +
+(Optional)

-Persistence hold the configuration for event persistence +Filter

-

-CatchupConfiguration -

-

-(Appears on: -EventPersistence) -

-

-

- - - - - - - -
-Field - -Description -
-enabled
bool +decodeMessage
bool
+(Optional)

-Enabled enables to triggered the missed schedule when eventsource -restarts +DecodeMessage specifies if all the messages should be base64 decoded. If +set to true the decoding is done before the evaluation of JSONBody

-maxDuration
string +waitTimeInSeconds
int32
+(Optional)

-MaxDuration holds max catchup duration +WaitTimeInSeconds is the duration (in seconds) for which the event +source waits between empty results from the queue. The default value is +3 seconds.

-

-ConfigMapPersistence +

+AzureServiceBusEventSource

(Appears on: -EventPersistence) +EventSourceSpec)

+

+AzureServiceBusEventSource describes the event source for azure service +bus More info at +https://docs.microsoft.com/en-us/azure/service-bus-messaging/ +

@@ -748,165 +796,179 @@ Description - -
-name
string +connectionString
+ +Kubernetes core/v1.SecretKeySelector
+(Optional)

-Name of the configmap +ConnectionString is the connection string for the Azure Service Bus. If +this fields is not provided it will try to access via Azure AD with +DefaultAzureCredential and FullyQualifiedNamespace.

-createIfNotExist
bool +queueName
string

-CreateIfNotExist will create configmap if it doesn’t exists +QueueName is the name of the Azure Service Bus Queue

-

-EmitterEventSource -

-

-(Appears on: -EventSourceSpec) -

-

-

-EmitterEventSource describes the event source for emitter More info at -https://emitter.io/develop/getting-started/ -

-

- - - - - - - - + +
-Field - -Description -
-broker
string +topicName
string

-Broker URI to connect to. +TopicName is the name of the Azure Service Bus Topic

-channelKey
string +subscriptionName
string

-ChannelKey refers to the channel key +SubscriptionName is the name of the Azure Service Bus Topic Subscription

-channelName
string +tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
+(Optional)

-ChannelName refers to the channel name +TLS configuration for the service bus client

-username
- -Kubernetes core/v1.SecretKeySelector +jsonBody
bool
(Optional)

-Username to use to connect to broker +JSONBody specifies that all event body payload coming from this source +will be JSON

-password
- -Kubernetes core/v1.SecretKeySelector +metadata
map\[string\]string
(Optional)

-Password to use to connect to broker +Metadata holds the user defined metadata which will passed along the +event payload.

-connectionBackoff
-github.com/argoproj/argo-events/pkg/apis/common.Backoff +filter
+ EventSourceFilter +
(Optional)

-Backoff holds parameters applied to connection. +Filter

-jsonBody
bool +fullyQualifiedNamespace
string
(Optional)

-JSONBody specifies that all event body payload coming from this source -will be JSON +FullyQualifiedNamespace is the Service Bus namespace name (ex: +myservicebus.servicebus.windows.net). This field is necessary to access +via Azure AD (managed identity) and it is ignored if ConnectionString is +set.

+

+BitbucketAuth +

+

+(Appears on: +BitbucketEventSource) +

+

+

+BitbucketAuth holds the different auth strategies for connecting to +Bitbucket +

+

+ + + + + + + +
+Field + +Description +
-tls
-github.com/argoproj/argo-events/pkg/apis/common.TLSConfig +basic
+ BitbucketBasicAuth +
(Optional)

-TLS configuration for the emitter client. +Basic is BasicAuth auth strategy.

-metadata
map\[string\]string +oauthToken
+ +Kubernetes core/v1.SecretKeySelector
(Optional)

-Metadata holds the user defined metadata which will passed along the -event payload. +OAuthToken refers to the K8s secret that holds the OAuth Bearer token.

-

-EventPersistence +

+BitbucketBasicAuth

(Appears on: -CalendarEventSource) +BitbucketAuth)

+

+BasicAuth holds the information required to authenticate user via basic +auth mechanism +

@@ -922,37 +984,40 @@ Description
-catchup
- -CatchupConfiguration +username
+ +Kubernetes core/v1.SecretKeySelector

-Catchup enables to triggered the missed schedule when eventsource -restarts +Username refers to the K8s secret that holds the username.

-configMap
- -ConfigMapPersistence +password
+ +Kubernetes core/v1.SecretKeySelector

-ConfigMap holds configmap details for persistence +Password refers to the K8s secret that holds the password.

-

-EventSource +

+BitbucketEventSource

+(Appears on: +EventSourceSpec) +

+

-EventSource is the definition of a eventsource resource +BitbucketEventSource describes the event source for Bitbucket

@@ -969,416 +1034,444 @@ Description +

+

+
-metadata
- -Kubernetes meta/v1.ObjectMeta +deleteHookOnFinish
bool
-Refer to the Kubernetes API documentation for the fields of the -metadata field. +(Optional) +

+DeleteHookOnFinish determines whether to delete the defined Bitbucket +hook once the event source is stopped. +

-spec
- EventSourceSpec - +metadata
map\[string\]string
-

- +(Optional) +

+Metadata holds the user defined metadata which will be passed along the +event payload. +

+ + - - - +
-eventBusName
string +webhook
+ WebhookContext +

-EventBusName references to a EventBus name. By default the value is -“default” +Webhook refers to the configuration required to run an http server

-template
- Template +auth
+ BitbucketAuth
-(Optional)

-Template is the pod specification for the event source +Auth information required to connect to Bitbucket.

-service
-Service +events
\[\]string
-(Optional)

-Service is the specifications of the service to expose the event source +Events this webhook is subscribed to.

-replica
int32 +owner
string
+(Optional)

-DeprecatedReplica is the event source deployment replicas Deprecated: -use replicas instead, will be removed in v1.5 +DeprecatedOwner is the owner of the repository. Deprecated: use +Repositories instead. Will be unsupported in v1.9

-minio
-map\[string\]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact - +projectKey
string
+(Optional)

-Minio event sources +DeprecatedProjectKey is the key of the project to which the repository +relates Deprecated: use Repositories instead. Will be unsupported in +v1.9

-calendar
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource - +repositorySlug
string
+(Optional)

-Calendar event sources +DeprecatedRepositorySlug is a URL-friendly version of a repository name, +automatically generated by Bitbucket for use in the URL Deprecated: use +Repositories instead. Will be unsupported in v1.9

-file
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource - +repositories
+ +\[\]BitbucketRepository
+(Optional)

-File event sources +Repositories holds a list of repositories for which integration needs to +set up

-resource
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource +filter
+
EventSourceFilter
+(Optional)

-Resource event sources +Filter

-webhook
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext - -
+
+

+BitbucketRepository +

-Webhook event sources +(Appears on: +BitbucketEventSource)

-
+ + + + + + - - - +
+Field + +Description +
-amqp
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource - +owner
string

-AMQP event sources +Owner is the owner of the repository

-kafka
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource - +repositorySlug
string

-Kafka event sources +RepositorySlug is a URL-friendly version of a repository name, +automatically generated by Bitbucket for use in the URL

-mqtt
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource - -
+
+

+BitbucketServerEventSource +

-MQTT event sources +(Appears on: +EventSourceSpec)

- +

+

+BitbucketServerEventSource refers to event-source related to Bitbucket +Server events +

+

+ + + + + + + - - - +
+Field + +Description +
-nats
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource - +webhook
+ WebhookContext +

-NATS event sources +Webhook holds configuration to run a http server.

-sns
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource - +projectKey
string
+(Optional)

-SNS event sources +DeprecatedProjectKey is the key of project for which integration needs +to set up. Deprecated: use Repositories instead. Will be unsupported in +v1.8.

-sqs
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource - +repositorySlug
string
+(Optional)

-SQS event sources +DeprecatedRepositorySlug is the slug of the repository for which +integration needs to set up. Deprecated: use Repositories instead. Will +be unsupported in v1.8.

-pubSub
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource - +projects
\[\]string
+(Optional)

-PubSub event sources +Projects holds a list of projects for which integration needs to set up, +this will add the webhook to all repositories in the project.

-github
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource - +repositories
+ +\[\]BitbucketServerRepository
+(Optional)

-Github event sources +Repositories holds a list of repositories for which integration needs to +set up.

-gitlab
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource - +events
\[\]string
+(Optional)

-Gitlab event sources +Events are bitbucket event to listen to. Refer +https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html

-hdfs
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource - +skipBranchRefsChangedOnOpenPR
bool
+(Optional)

-HDFS event sources +SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for +branches whenever there’s an associated open pull request. This helps in +optimizing the event handling process by avoiding unnecessary triggers +for branch reference changes that are already part of a pull request +under review.

-slack
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource - +accessToken
+ +Kubernetes core/v1.SecretKeySelector

-Slack event sources +AccessToken is reference to K8s secret which holds the bitbucket api +access information.

-storageGrid
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource - +webhookSecret
+ +Kubernetes core/v1.SecretKeySelector

-StorageGrid event sources +WebhookSecret is reference to K8s secret which holds the bitbucket +webhook secret (for HMAC validation).

-azureEventsHub
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource - +bitbucketserverBaseURL
string

-AzureEventsHub event sources +BitbucketServerBaseURL is the base URL for API requests to a custom +endpoint.

-stripe
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource - +deleteHookOnFinish
bool
+(Optional)

-Stripe event sources +DeleteHookOnFinish determines whether to delete the Bitbucket Server +hook for the project once the event source is stopped.

-emitter
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource - +metadata
map\[string\]string
+(Optional)

-Emitter event source +Metadata holds the user defined metadata which will passed along the +event payload.

-redis
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource +filter
+
EventSourceFilter
+(Optional)

-Redis event source +Filter

-nsq
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource - +tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
+(Optional)

-NSQ event source +TLS configuration for the bitbucketserver client.

-pulsar
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource - +checkInterval
string
+(Optional)

-Pulsar event source +CheckInterval is a duration in which to wait before checking that the +webhooks exist, e.g. 1s, 30m, 2h… (defaults to 1m)

-generic
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource - -
+
+

+BitbucketServerRepository +

-Generic event source +(Appears on: +BitbucketServerEventSource)

- +

+

+ + + + + + + -
+Field + +Description +
-replicas
int32 +projectKey
string

-Replicas is the event source deployment replicas +ProjectKey is the key of project for which integration needs to set up.

- - -status
- EventSourceStatus - +repositorySlug
string -(Optional) +

+RepositorySlug is the slug of the repository for which integration needs +to set up. +

-

-EventSourceSpec +

+CalendarEventSource

(Appears on: -EventSource) +EventSourceSpec)

-EventSourceSpec refers to specification of event-source resource +CalendarEventSource describes a time based dependency. One of the fields +(schedule, interval, or recurrence) must be passed. Schedule takes +precedence over interval; interval takes precedence over recurrence

@@ -1395,383 +1488,1639 @@ Description + + + + + +
-eventBusName
string +schedule
string
+(Optional)

-EventBusName references to a EventBus name. By default the value is -“default” +Schedule is a cron-like expression. For reference, see: +https://en.wikipedia.org/wiki/Cron

-template
- Template +interval
string
(Optional)

-Template is the pod specification for the event source +Interval is a string that describes an interval duration, e.g. 1s, 30m, +2h…

-service
-Service +exclusionDates
\[\]string +
+

+ExclusionDates defines the list of DATE-TIME exceptions for recurring +events. +

+
+timezone
string
(Optional)

-Service is the specifications of the service to expose the event source +Timezone in which to run the schedule

-replica
int32 +metadata
map\[string\]string
+(Optional)

-DeprecatedReplica is the event source deployment replicas Deprecated: -use replicas instead, will be removed in v1.5 +Metadata holds the user defined metadata which will passed along the +event payload.

-minio
-map\[string\]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact +persistence
+ EventPersistence

-Minio event sources +Persistence hold the configuration for event persistence

-calendar
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource +filter
+
EventSourceFilter
+(Optional)

-Calendar event sources +Filter

+

+CatchupConfiguration +

+

+(Appears on: +EventPersistence) +

+

+

+ + + + + + + + + + + +
+Field + +Description +
-file
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource - +enabled
bool

-File event sources +Enabled enables to triggered the missed schedule when eventsource +restarts

-resource
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource +maxDuration
string +
+

+MaxDuration holds max catchup duration +

+
+

+ConfigMapPersistence +

+

+(Appears on: +EventPersistence) +

+

+

+ + + + + + + + + + + + + + + + + +
+Field + +Description +
+name
string +
+

+Name of the configmap +

+
+createIfNotExist
bool +
+

+CreateIfNotExist will create configmap if it doesn’t exists +

+
+

+EmitterEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

+EmitterEventSource describes the event source for emitter More info at +https://emitter.io/develop/getting-started/ +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+broker
string +
+

+Broker URI to connect to. +

+
+channelKey
string +
+

+ChannelKey refers to the channel key +

+
+channelName
string +
+

+ChannelName refers to the channel name +

+
+username
+ +Kubernetes core/v1.SecretKeySelector +
+(Optional) +

+Username to use to connect to broker +

+
+password
+ +Kubernetes core/v1.SecretKeySelector +
+(Optional) +

+Password to use to connect to broker +

+
+connectionBackoff
+github.com/argoproj/argo-events/pkg/apis/common.Backoff +
+(Optional) +

+Backoff holds parameters applied to connection. +

+
+jsonBody
bool +
+(Optional) +

+JSONBody specifies that all event body payload coming from this source +will be JSON +

+
+tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig +
+(Optional) +

+TLS configuration for the emitter client. +

+
+metadata
map\[string\]string +
+(Optional) +

+Metadata holds the user defined metadata which will passed along the +event payload. +

+
+filter
+ EventSourceFilter
+(Optional)

-Resource event sources +Filter +

+
+

+EventPersistence +

+

+(Appears on: +CalendarEventSource) +

+

+

+ + + + + + + + + + + + + + + + + +
+Field + +Description +
+catchup
+ +CatchupConfiguration +
+

+Catchup enables to triggered the missed schedule when eventsource +restarts +

+
+configMap
+ +ConfigMapPersistence +
+

+ConfigMap holds configmap details for persistence +

+
+

+EventSource +

+

+

+EventSource is the definition of a eventsource resource +

+

+ + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+metadata
+ +Kubernetes meta/v1.ObjectMeta +
+Refer to the Kubernetes API documentation for the fields of the +metadata field. +
+spec
+ EventSourceSpec + +
+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+eventBusName
string +
+

+EventBusName references to a EventBus name. By default the value is +“default” +

+
+template
+ Template +
+(Optional) +

+Template is the pod specification for the event source +

+
+service
+Service +
+(Optional) +

+Service is the specifications of the service to expose the event source +

+
+minio
+map\[string\]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact + +
+

+Minio event sources +

+
+calendar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource + +
+

+Calendar event sources +

+
+file
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource + +
+

+File event sources +

+
+resource
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource + +
+

+Resource event sources +

+
+webhook
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookEventSource + +
+

+Webhook event sources +

+
+amqp
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource + +
+

+AMQP event sources +

+
+kafka
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource + +
+

+Kafka event sources +

+
+mqtt
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource + +
+

+MQTT event sources +

+
+nats
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource + +
+

+NATS event sources +

+
+sns
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource + +
+

+SNS event sources +

+
+sqs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource + +
+

+SQS event sources +

+
+pubSub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource + +
+

+PubSub event sources +

+
+github
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource + +
+

+Github event sources +

+
+gitlab
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource + +
+

+Gitlab event sources +

+
+hdfs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource + +
+

+HDFS event sources +

+
+slack
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource + +
+

+Slack event sources +

+
+storageGrid
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource + +
+

+StorageGrid event sources +

+
+azureEventsHub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource + +
+

+AzureEventsHub event sources +

+
+stripe
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource + +
+

+Stripe event sources +

+
+emitter
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource + +
+

+Emitter event source +

+
+redis
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource + +
+

+Redis event source +

+
+nsq
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource + +
+

+NSQ event source +

+
+pulsar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource + +
+

+Pulsar event source +

+
+generic
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource + +
+

+Generic event source +

+
+replicas
int32 +
+

+Replicas is the event source deployment replicas +

+
+bitbucketserver
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerEventSource + +
+

+Bitbucket Server event sources +

+
+bitbucket
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketEventSource + +
+

+Bitbucket event sources +

+
+redisStream
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisStreamEventSource + +
+

+Redis stream source +

+
+azureServiceBus
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureServiceBusEventSource + +
+

+Azure Service Bus event source +

+
+azureQueueStorage
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureQueueStorageEventSource + +
+

+AzureQueueStorage event source +

+
+sftp
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SFTPEventSource + +
+

+SFTP event sources +

+
+gerrit
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GerritEventSource + +
+

+Gerrit event source +

+
+
+status
+ EventSourceStatus + +
+(Optional) +
+

+EventSourceFilter +

+

+(Appears on: +AMQPEventSource, +AzureEventsHubEventSource, +AzureQueueStorageEventSource, +AzureServiceBusEventSource, +BitbucketEventSource, +BitbucketServerEventSource, +CalendarEventSource, +EmitterEventSource, +FileEventSource, +GenericEventSource, +GerritEventSource, +GithubEventSource, +GitlabEventSource, +HDFSEventSource, +KafkaEventSource, +MQTTEventSource, +NATSEventsSource, +NSQEventSource, +PubSubEventSource, +PulsarEventSource, +RedisEventSource, +RedisStreamEventSource, +SFTPEventSource, +SNSEventSource, +SQSEventSource, +SlackEventSource, +WebhookEventSource) +

+

+

+ + + + + + + + + + + + + +
+Field + +Description +
+expression
string +
+
+

+EventSourceSpec +

+

+(Appears on: +EventSource) +

+

+

+EventSourceSpec refers to specification of event-source resource +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+eventBusName
string +
+

+EventBusName references to a EventBus name. By default the value is +“default” +

+
+template
+ Template +
+(Optional) +

+Template is the pod specification for the event source +

+
+service
+Service +
+(Optional) +

+Service is the specifications of the service to expose the event source +

+
+minio
+map\[string\]github.com/argoproj/argo-events/pkg/apis/common.S3Artifact + +
+

+Minio event sources +

+
+calendar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource + +
+

+Calendar event sources +

+
+file
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource + +
+

+File event sources +

+
+resource
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource + +
+

+Resource event sources +

+
+webhook
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookEventSource + +
+

+Webhook event sources +

+
+amqp
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource + +
+

+AMQP event sources +

+
+kafka
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource + +
+

+Kafka event sources +

+
+mqtt
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource + +
+

+MQTT event sources +

+
+nats
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource + +
+

+NATS event sources +

+
+sns
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource + +
+

+SNS event sources +

+
+sqs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource + +
+

+SQS event sources +

+
+pubSub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource + +
+

+PubSub event sources +

+
+github
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource + +
+

+Github event sources +

+
+gitlab
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource + +
+

+Gitlab event sources +

+
+hdfs
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource + +
+

+HDFS event sources +

+
+slack
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource + +
+

+Slack event sources +

+
+storageGrid
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource + +
+

+StorageGrid event sources +

+
+azureEventsHub
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource + +
+

+AzureEventsHub event sources +

+
+stripe
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource + +
+

+Stripe event sources +

+
+emitter
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource + +
+

+Emitter event source +

+
+redis
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource + +
+

+Redis event source +

+
+nsq
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource + +
+

+NSQ event source +

+
+pulsar
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource + +
+

+Pulsar event source +

+
+generic
+ +map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource + +
+

+Generic event source

-webhook
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext - +replicas
int32

-Webhook event sources +Replicas is the event source deployment replicas

-amqp
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource +bitbucketserver
+
+map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerEventSource

-AMQP event sources +Bitbucket Server event sources

-kafka
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource +bitbucket
+
+map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketEventSource

-Kafka event sources +Bitbucket event sources

-mqtt
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource +redisStream
+
+map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisStreamEventSource

-MQTT event sources +Redis stream source

-nats
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource +azureServiceBus
+
+map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureServiceBusEventSource

-NATS event sources +Azure Service Bus event source

-sns
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource +azureQueueStorage
+
+map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureQueueStorageEventSource

-SNS event sources +AzureQueueStorage event source

-sqs
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource +sftp
+
+map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SFTPEventSource

-SQS event sources +SFTP event sources

-pubSub
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource +gerrit
+
+map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GerritEventSource

-PubSub event sources +Gerrit event source

+

+EventSourceStatus +

+

+(Appears on: +EventSource) +

+

+

+EventSourceStatus holds the status of the event-source resource +

+

+ + + + + + + + + +
+Field + +Description +
-github
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource - +Status
+github.com/argoproj/argo-events/pkg/apis/common.Status

-Github event sources +(Members of Status are embedded into this type.)

+

+FileEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

+FileEventSource describes an event-source for file related events. +

+

+ + + + + + + + + +
+Field + +Description +
-gitlab
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource - +eventType
string

-Gitlab event sources +Type of file operations to watch Refer +https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go +for more information

-hdfs
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource - +watchPathConfig
+ WatchPathConfig +

-HDFS event sources +WatchPathConfig contains configuration about the file path to watch

-slack
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource - +polling
bool

-Slack event sources +Use polling instead of inotify

-storageGrid
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource - +metadata
map\[string\]string
+(Optional)

-StorageGrid event sources +Metadata holds the user defined metadata which will passed along the +event payload.

-azureEventsHub
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource +filter
+
EventSourceFilter
+(Optional)

-AzureEventsHub event sources +Filter

+

+GenericEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

+GenericEventSource refers to a generic event source. It can be used to +implement a custom event source. +

+

+ + + + + + + +
+Field + +Description +
-stripe
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource - +url
string

-Stripe event sources +URL of the gRPC server that implements the event source.

-emitter
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource - +config
string

-Emitter event source +Config is the event source configuration

-redis
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource - +insecure
bool

-Redis event source +Insecure determines the type of connection.

-nsq
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource - +jsonBody
bool
+(Optional)

-NSQ event source +JSONBody specifies that all event body payload coming from this source +will be JSON

-pulsar
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource - +metadata
map\[string\]string
+(Optional)

-Pulsar event source +Metadata holds the user defined metadata which will passed along the +event payload.

-generic
- -map\[string\]github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource - +authSecret
+ +Kubernetes core/v1.SecretKeySelector
+(Optional)

-Generic event source +AuthSecret holds a secret selector that contains a bearer token for +authentication

-replicas
int32 +filter
+ EventSourceFilter +
+(Optional)

-Replicas is the event source deployment replicas +Filter

-

-EventSourceStatus +

+GerritEventSource

(Appears on: -EventSource) +EventSourceSpec)

-EventSourceStatus holds the status of the event-source resource +GerritEventSource refers to event-source related to gerrit events

@@ -1788,72 +3137,68 @@ Description - -
-Status
-github.com/argoproj/argo-events/pkg/apis/common.Status +webhook
+ WebhookContext +

-(Members of Status are embedded into this type.) +Webhook holds configuration to run a http server

-

-FileEventSource -

+ + +hookName
string + +

-(Appears on: -EventSourceSpec) +HookName is the name of the webhook

+ + + + +events
\[\]string + +

-

-FileEventSource describes an event-source for file related events. +Events are gerrit event to listen to. Refer +https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events

-

- - - - - + - - @@ -1869,98 +3214,93 @@ event payload.

- -
-Field - -Description -
-eventType
string +auth
+github.com/argoproj/argo-events/pkg/apis/common.BasicAuth
+(Optional)

-Type of file operations to watch Refer -https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go -for more information +Auth hosts secret selectors for username and password

-watchPathConfig
- WatchPathConfig - +gerritBaseURL
string

-WatchPathConfig contains configuration about the file path to watch +GerritBaseURL is the base URL for API requests to a custom endpoint

-polling
bool +deleteHookOnFinish
bool
+(Optional)

-Use polling instead of inotify +DeleteHookOnFinish determines whether to delete the Gerrit hook for the +project once the event source is stopped.

-

-GenericEventSource -

-

-(Appears on: -EventSourceSpec) -

-

-

-GenericEventSource refers to a generic event source. It can be used to -implement a custom event source. -

-

- - - - - - - - + +
-Field - -Description -
-url
string +projects
\[\]string

-URL of the gRPC server that implements the event source. +List of project namespace paths like “whynowy/test”.

-config
string +sslVerify
bool
+(Optional)

-Config is the event source configuration +SslVerify to enable ssl verification

-insecure
bool +filter
+ EventSourceFilter +
+(Optional)

-Insecure determines the type of connection. +Filter

+

+GithubAppCreds +

+

+(Appears on: +GithubEventSource) +

+

+

+ + + + + + + + @@ -1995,6 +3335,7 @@ Description id
int64 @@ -2156,7 +3502,59 @@ event payload. + + + + + + + + + + + + + + + + @@ -2203,8 +3601,10 @@ Webhook holds configuration to run a http server projectID
string @@ -2215,19 +3615,19 @@ ProjectID is the id of project for which integration needs to setup @@ -2277,6 +3677,56 @@ event payload.

+ + + + + + + + + + + + + + + +
+Field + +Description +
-jsonBody
bool +privateKey
+ +Kubernetes core/v1.SecretKeySelector
-(Optional)

-JSONBody specifies that all event body payload coming from this source -will be JSON +PrivateKey refers to a K8s secret containing the GitHub app private key

-metadata
map\[string\]string +appID
int64
-(Optional)

-Metadata holds the user defined metadata which will passed along the -event payload. +AppID refers to the GitHub App ID for the application you created

-authSecret
- -Kubernetes core/v1.SecretKeySelector +installationID
int64
-(Optional)

-AuthSecret holds a secret selector that contains a bearer token for -authentication +InstallationID refers to the Installation ID of the GitHub app you +created and installed

+(Optional)

Id is the webhook’s id Deprecated: This is not used at all, will be removed in v1.6 @@ -2018,6 +3359,7 @@ Webhook refers to the configuration required to run a http server owner
string

+(Optional)

DeprecatedOwner refers to GitHub owner name i.e. argoproj Deprecated: use Repositories instead. Will be unsupported in v 1.6 @@ -2029,6 +3371,7 @@ use Repositories instead. Will be unsupported in v 1.6 repository
string

+(Optional)

DeprecatedRepository refers to GitHub repo name i.e. argo-events Deprecated: use Repositories instead. Will be unsupported in v 1.6 @@ -2040,12 +3383,15 @@ Deprecated: use Repositories instead. Will be unsupported in v 1.6 events
\[\]string

+

+Events refer to Github events to which the event source will subscribe +

apiToken
- + Kubernetes core/v1.SecretKeySelector
@@ -2058,7 +3404,7 @@ APIToken refers to a K8s secret containing github api token
webhookSecret
- + Kubernetes core/v1.SecretKeySelector
@@ -2087,7 +3433,7 @@ Insecure tls verification (Optional)

Active refers to status of the webhook for event deliveries. -https://developer.github.com/webhooks/creating/\#active +https://developer.github.com/webhooks/creating/#active

Repositories holds the information of repositories, which uses repo -owner as the key, and list of repo names as the value +owner as the key, and list of repo names as the value. Not required if +Organizations is set. +

+
+organizations
\[\]string +
+

+Organizations holds the names of organizations (used for organization +level webhooks). Not required if Repositories is set. +

+
+githubApp
+ GithubAppCreds + +
+(Optional) +

+GitHubApp holds the GitHub app credentials +

+
+filter
+ EventSourceFilter + +
+(Optional) +

+Filter +

+
+payloadEnrichment
+ +PayloadEnrichmentFlags +
+(Optional) +

+PayloadEnrichment holds flags that determine whether to enrich GitHub’s +original payload with additional information.

+(Optional)

-ProjectID is the id of project for which integration needs to setup +DeprecatedProjectID is the id of project for which integration needs to +setup Deprecated: use Projects instead. Will be unsupported in v 1.7

Events are gitlab event to listen to. Refer -https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go\#L794. +https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794.

accessToken
- + Kubernetes core/v1.SecretKeySelector

-AccessToken is reference to k8 secret which holds the gitlab api access +AccessToken references to k8 secret which holds the gitlab api access information

+projects
\[\]string +
+(Optional) +

+List of project IDs or project namespace paths like “whynowy/test”. +Projects and groups cannot be empty at the same time. +

+
+secretToken
+ +Kubernetes core/v1.SecretKeySelector +
+

+SecretToken references to k8 secret which holds the Secret Token used by +webhook config +

+
+filter
+ EventSourceFilter + +
+(Optional) +

+Filter +

+
+groups
\[\]string +
+(Optional) +

+List of group IDs or group name like “test”. Group level hook available +in Premium and Ultimate Gitlab. +

+

@@ -2357,7 +3807,7 @@ ccache or keytab is used. krbCCacheSecret
- + Kubernetes core/v1.SecretKeySelector @@ -2370,7 +3820,7 @@ or keytab can be set to use Kerberos. krbKeytabSecret
- + Kubernetes core/v1.SecretKeySelector @@ -2405,7 +3855,7 @@ if keytab is used. krbConfigConfigMap
- + Kubernetes core/v1.ConfigMapKeySelector @@ -2438,6 +3888,19 @@ event payload.

+ + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ +

@@ -2536,6 +3999,7 @@ URL to kafka cluster, multiple URLs separated by comma partition
string +(Optional)

Partition name

@@ -2646,6 +4110,36 @@ SASL configuration for the kafka client

+ + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ + + + +config
string + + +(Optional) +

+Yaml format Sarama config for Kafka connection. It follows the struct of +sarama.Config. See +https://github.com/IBM/sarama/blob/main/config.go +e.g. +

+

+consumer: fetch: min: 1 net: MaxOpenRequests: 5 +

+ +

@@ -2749,6 +4243,31 @@ event payload.

+ + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ + + + +auth
+github.com/argoproj/argo-events/pkg/apis/common.BasicAuth + + +(Optional) +

+Auth hosts secret selectors for username and password +

+ +

@@ -2790,7 +4309,7 @@ Baisc auth with username and password token
- + Kubernetes core/v1.SecretKeySelector @@ -2803,7 +4322,7 @@ Token used to connect nkey
- + Kubernetes core/v1.SecretKeySelector @@ -2816,7 +4335,7 @@ NKey used to connect credential
- + Kubernetes core/v1.SecretKeySelector @@ -2903,31 +4422,57 @@ github.com/argoproj/argo-events/pkg/apis/common.TLSConfig (Optional)

-TLS configuration for the nats client. +TLS configuration for the nats client. +

+ + + + +metadata
map\[string\]string + + +(Optional) +

+Metadata holds the user defined metadata which will passed along the +event payload. +

+ + + + +auth
+NATSAuth + + +(Optional) +

+Auth information

-metadata
map\[string\]string +filter
+ EventSourceFilter + (Optional)

-Metadata holds the user defined metadata which will passed along the -event payload. +Filter

-auth
-NATSAuth +queue
string (Optional)

-Auth information +Queue is the name of the queue group to subscribe as if specified. Uses +QueueSubscribe logic to subscribe as queue group. If the queue is empty, +uses default Subscribe logic.

@@ -3036,6 +4581,19 @@ event payload.

+ + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ +

@@ -3065,7 +4623,7 @@ Description

-Orgnization or user name +Organization or user name

@@ -3081,6 +4639,42 @@ Repository names +

+PayloadEnrichmentFlags +

+

+(Appears on: +GithubEventSource) +

+

+

+ + + + + + + + + + + + + +
+Field + +Description +
+fetchPROnPRCommentAdded
bool +
+(Optional) +

+FetchPROnPRCommentAdded determines whether to enrich the payload +provided by GitHub on “pull request comment added” events, with the full +pull request info +

+

PubSubEventSource

@@ -3161,7 +4755,7 @@ time you update the setting, which has a possibility of event loss. credentialSecret
- + Kubernetes core/v1.SecretKeySelector @@ -3200,25 +4794,26 @@ will be JSON -credentialsFile
string +metadata
map\[string\]string +(Optional)

-CredentialsFile is the file that contains credentials to authenticate -for GCP Deprecated: will be removed in v1.5, use CredentialSecret -instead +Metadata holds the user defined metadata which will passed along the +event payload.

-metadata
map\[string\]string +filter
+ EventSourceFilter + (Optional)

-Metadata holds the user defined metadata which will passed along the -event payload. +Filter

@@ -3283,7 +4878,7 @@ Configure the service URL for the Pulsar service. tlsTrustCertsSecret
- + Kubernetes core/v1.SecretKeySelector @@ -3364,6 +4959,60 @@ event payload.

+ + +authTokenSecret
+ +Kubernetes core/v1.SecretKeySelector + + +(Optional) +

+Authentication token for the pulsar client. Either token or athenz can +be set to use auth. +

+ + + + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ + + + +authAthenzParams
map\[string\]string + + +(Optional) +

+Authentication athenz parameters for the pulsar client. Refer +https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go +Either token or athenz can be set to use auth. +

+ + + + +authAthenzSecret
+ +Kubernetes core/v1.SecretKeySelector + + +(Optional) +

+Authentication athenz privateKey secret for the pulsar client. +AuthAthenzSecret must be set if AuthAthenzParams is used. +

+ +

@@ -3377,7 +5026,7 @@ RedisEventSource

RedisEventSource describes an event source for the Redis PubSub. More info at -https://godoc.org/github.com/go-redis/redis\#example-PubSub +https://godoc.org/github.com/go-redis/redis#example-PubSub

@@ -3405,7 +5054,7 @@ HostAddress refers to the address of the Redis host/server + + + + + + + + + + + + + + + + + + + + + + + + + + + +
password
- + Kubernetes core/v1.SecretKeySelector
@@ -3429,43 +5078,229 @@ specified if password is declared
-db
int32 +db
int32 +
+(Optional) +

+DB to use. If not specified, default DB 0 will be used. +

+
+channels
\[\]string +
+
+tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig +
+(Optional) +

+TLS configuration for the redis client. +

+
+metadata
map\[string\]string +
+(Optional) +

+Metadata holds the user defined metadata which will passed along the +event payload. +

+
+filter
+ EventSourceFilter + +
+(Optional) +

+Filter +

+
+jsonBody
bool +
+(Optional) +

+JSONBody specifies that all event body payload coming from this source +will be JSON +

+
+username
string +
+(Optional) +

+Username required for ACL style authentication if any. +

+
+

+RedisStreamEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

+RedisStreamEventSource describes an event source for Redis streams +(https://redis.io/topics/streams-intro) +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -3524,7 +5359,7 @@ filter. + + + +
+Field + +Description +
+hostAddress
string +
+

+HostAddress refers to the address of the Redis host/server (master +instance) +

+
+password
+ +Kubernetes core/v1.SecretKeySelector +
+(Optional) +

+Password required for authentication if any. +

+
+db
int32 +
+(Optional) +

+DB to use. If not specified, default DB 0 will be used. +

+
+streams
\[\]string +
+

+Streams to look for entries. XREADGROUP is used on all streams using a +single consumer group. +

+
+maxMsgCountPerRead
int32 +
+(Optional) +

+MaxMsgCountPerRead holds the maximum number of messages per stream that +will be read in each XREADGROUP of all streams Example: if there are 2 +streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a +total of 20 messages. Same as COUNT option in +XREADGROUP(https://redis.io/topics/streams-intro). +Defaults to 10 +

+
+consumerGroup
string +
+(Optional) +

+ConsumerGroup refers to the Redis stream consumer group that will be +created on all redis streams. Messages are read through this group. +Defaults to ‘argo-events-cg’ +

+
+tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig
(Optional)

-DB to use. If not specified, default DB 0 will be used. +TLS configuration for the redis client.

-channels
\[\]string +metadata
map\[string\]string
+(Optional) +

+Metadata holds the user defined metadata which will passed along the +event payload. +

-tls
-github.com/argoproj/argo-events/pkg/apis/common.TLSConfig +filter
+ EventSourceFilter +
(Optional)

-TLS configuration for the redis client. +Filter

-metadata
map\[string\]string +username
string
(Optional)

-Metadata holds the user defined metadata which will passed along the -event payload. +Username required for ACL style authentication if any.

GroupVersionResource
- + Kubernetes meta/v1.GroupVersionResource
@@ -3562,6 +5397,16 @@ event payload.

+cluster
string +
+

+Cluster from which events will be listened to +

+

@@ -3586,7 +5431,7 @@ ResourceFilter

-ResourceFilter contains K8 ObjectMeta information to further filter +ResourceFilter contains K8s ObjectMeta information to further filter resource event objects

@@ -3623,7 +5468,10 @@ Prefix filter is applied on the resource name.

Labels provide listing options to K8s API to watch resource/s. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ -for more info. +for more info. Unlike K8s field selector, multiple values are passed as +comma separated values instead of list of values. Eg: value: +value1,value2. Same as K8s label selector, operator “=”, “==”, “!=”, +“exists”, “!”, “notin”, “in”, “gt” and “lt” are supported

@@ -3646,7 +5494,7 @@ Same as K8s field selector, operator “=”, “==” and “!=” are supporte createdBy
- + Kubernetes meta/v1.Time @@ -3671,6 +5519,142 @@ treated as valid. +

+SFTPEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

+SFTPEventSource describes an event-source for sftp related events. +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+eventType
string +
+

+Type of file operations to watch Refer +https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go +for more information +

+
+watchPathConfig
+ WatchPathConfig + +
+

+WatchPathConfig contains configuration about the file path to watch +

+
+username
+ +Kubernetes core/v1.SecretKeySelector +
+

+Username required for authentication if any. +

+
+password
+ +Kubernetes core/v1.SecretKeySelector +
+

+Password required for authentication if any. +

+
+sshKeySecret
+ +Kubernetes core/v1.SecretKeySelector +
+

+SSHKeySecret refers to the secret that contains SSH key. Key needs to +contain private key and public key. +

+
+address
+ +Kubernetes core/v1.SecretKeySelector +
+

+Address sftp address. +

+
+metadata
map\[string\]string +
+(Optional) +

+Metadata holds the user defined metadata which will passed along the +event payload. +

+
+filter
+ EventSourceFilter + +
+(Optional) +

+Filter +

+
+pollIntervalDuration
string +
+(Optional) +

+PollIntervalDuration the interval at which to poll the SFTP server +defaults to 10 seconds +

+

SNSEventSource

@@ -3720,24 +5704,24 @@ TopicArn accessKey
- + Kubernetes core/v1.SecretKeySelector

-AccessKey refers K8 secret containing aws access key +AccessKey refers K8s secret containing aws access key

secretKey
- + Kubernetes core/v1.SecretKeySelector

-SecretKey refers K8 secret containing aws secret key +SecretKey refers K8s secret containing aws secret key

@@ -3786,6 +5770,31 @@ verification

+ + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ + + + +endpoint
string + + +(Optional) +

+Endpoint configures connection to a specific SNS endpoint instead of +Amazons servers +

+ +

@@ -3815,24 +5824,24 @@ Description accessKey
- + Kubernetes core/v1.SecretKeySelector

-AccessKey refers K8 secret containing aws access key +AccessKey refers K8s secret containing aws access key

secretKey
- + Kubernetes core/v1.SecretKeySelector

-SecretKey refers K8 secret containing aws secret key +SecretKey refers K8s secret containing aws secret key

@@ -3914,6 +5923,59 @@ event payload.

+ + +dlq
bool + + +(Optional) +

+DLQ specifies if a dead-letter queue is configured for messages that +can’t be processed successfully. If set to true, messages with invalid +payload won’t be acknowledged to allow to forward them farther to the +dead-letter queue. The default value is false. +

+ + + + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ + + + +endpoint
string + + +(Optional) +

+Endpoint configures connection to a specific SQS endpoint instead of +Amazons servers +

+ + + + +sessionToken
+ +Kubernetes core/v1.SecretKeySelector + + +(Optional) +

+SessionToken refers to K8s secret containing AWS temporary +credentials(STS) session token +

+ +

@@ -3957,9 +6019,8 @@ Key name (Optional)

-Supported operations like ==, !=, <=, >= etc. Defaults to ==. -Refer -https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/\#label-selectors +Supported operations like ==, != etc. Defaults to ==. Refer +https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info.

@@ -4003,7 +6064,7 @@ Description ports
- + \[\]Kubernetes core/v1.ServicePort @@ -4026,7 +6087,7 @@ creation of the service will fail. This field can not be changed through updates. Valid values are “None”, empty string (“”), or a valid IP address. “None” can be specified for headless services when proxying is not required. More info: -https://kubernetes.io/docs/concepts/services-networking/service/\#virtual-ips-and-service-proxies +https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies

@@ -4059,7 +6120,7 @@ Description signingSecret
- + Kubernetes core/v1.SecretKeySelector @@ -4071,7 +6132,7 @@ Slack App signing secret token
- + Kubernetes core/v1.SecretKeySelector @@ -4104,6 +6165,19 @@ event payload.

+ + +filter
+ EventSourceFilter + + + +(Optional) +

+Filter +

+ +

@@ -4196,7 +6270,7 @@ S3 region. Defaults to us-east-1 authToken
- + Kubernetes core/v1.SecretKeySelector @@ -4322,7 +6396,7 @@ CreateWebhook if specified creates a new webhook programmatically. apiKey
- + Kubernetes core/v1.SecretKeySelector @@ -4411,7 +6485,7 @@ source pod. More info: container
- + Kubernetes core/v1.Container @@ -4424,7 +6498,7 @@ Container is the main container image to run in the event source pod volumes
- + \[\]Kubernetes core/v1.Volume @@ -4438,7 +6512,7 @@ eventsource. securityContext
- + Kubernetes core/v1.PodSecurityContext @@ -4453,7 +6527,7 @@ values of each field. affinity
- + Kubernetes core/v1.Affinity @@ -4466,7 +6540,7 @@ If specified, the pod’s scheduling constraints tolerations
- + \[\]Kubernetes core/v1.Toleration @@ -4493,7 +6567,7 @@ scheduled on that node. More info: imagePullSecrets
- + \[\]Kubernetes core/v1.LocalObjectReference @@ -4504,7 +6578,7 @@ same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: -https://kubernetes.io/docs/concepts/containers/images\#specifying-imagepullsecrets-on-a-pod +https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod

@@ -4549,7 +6623,8 @@ WatchPathConfig

(Appears on: FileEventSource, -HDFSEventSource) +HDFSEventSource, +SFTPEventSource)

@@ -4603,13 +6678,16 @@ WebhookContext

(Appears on: -EventSourceSpec, +BitbucketEventSource, +BitbucketServerEventSource, +GerritEventSource, GithubEventSource, GitlabEventSource, SNSEventSource, SlackEventSource, StorageGridEventSource, -StripeEventSource) +StripeEventSource, +WebhookEventSource)

@@ -4673,7 +6751,7 @@ URL is the url of the server. serverCertSecret
- + Kubernetes core/v1.SecretKeySelector @@ -4685,7 +6763,7 @@ ServerCertPath refers the file that contains the cert. serverKeySecret
- + Kubernetes core/v1.SecretKeySelector @@ -4709,7 +6787,7 @@ event payload. authSecret
- + Kubernetes core/v1.SecretKeySelector @@ -4722,21 +6800,65 @@ authentication -serverCertPath
string +maxPayloadSize
int64 + + +(Optional) +

+MaxPayloadSize is the maximum webhook payload size that the server will +accept. Requests exceeding that limit will be rejected with “request too +large” response. Default value: 1048576 (1MB). +

+ + + + +

+WebhookEventSource +

+

+(Appears on: +EventSourceSpec) +

+

+

+CalendarEventSource describes an HTTP based EventSource +

+

+ + + + + + + + + + diff --git a/api/jsonschema/schema.json b/api/jsonschema/schema.json new file mode 100644 index 0000000000..787f7a41c9 --- /dev/null +++ b/api/jsonschema/schema.json @@ -0,0 +1,22280 @@ +{ + "$id": "http://events.argoproj.io/events.json", + "$schema": "http://json-schema.org/schema#", + "definitions": { + "io.argoproj.common.Amount": { + "description": "Amount represent a numeric amount.", + "type": "number" + }, + "io.argoproj.common.Backoff": { + "description": "Backoff for an operation", + "properties": { + "duration": { + "$ref": "#/definitions/io.argoproj.common.Int64OrString", + "description": "The initial duration in nanoseconds or strings like \"1s\", \"3m\"" + }, + "factor": { + "$ref": "#/definitions/io.argoproj.common.Amount", + "description": "Duration is multiplied by factor each iteration" + }, + "jitter": { + "$ref": "#/definitions/io.argoproj.common.Amount", + "description": "The amount of jitter applied each iteration" + }, + "steps": { + "description": "Exit with error after this many steps", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.argoproj.common.BasicAuth": { + "description": "BasicAuth contains the reference to K8s secrets that holds the username and password", + "properties": { + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Password refers to the Kubernetes secret that holds the password required for basic auth." + }, + "username": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Username refers to the Kubernetes secret that holds the username required for basic auth." + } + }, + "type": "object" + }, + "io.argoproj.common.Condition": { + "description": "Condition contains details about resource state", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transitioned from one status to another." + }, + "message": { + "description": "Human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. For example, \"ImageNotFound\"", + "type": "string" + }, + "status": { + "description": "Condition status, True, False or Unknown.", + "type": "string" + }, + "type": { + "description": "Condition type.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.argoproj.common.Int64OrString": { + "format": "int64-or-string", + "type": [ + "integer", + "string" + ] + }, + "io.argoproj.common.Metadata": { + "description": "Metadata holds the annotations and labels of an event source pod", + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + } + }, + "type": "object" + }, + "io.argoproj.common.Resource": { + "description": "Resource represent arbitrary structured data.", + "type": "object" + }, + "io.argoproj.common.S3Artifact": { + "description": "S3Artifact contains information about an S3 connection and bucket", + "properties": { + "accessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "bucket": { + "$ref": "#/definitions/io.argoproj.common.S3Bucket" + }, + "caCertificate": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "endpoint": { + "type": "string" + }, + "events": { + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.common.S3Filter" + }, + "insecure": { + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "type": "object" + }, + "region": { + "type": "string" + }, + "secretKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + }, + "required": [ + "endpoint", + "bucket", + "accessKey", + "secretKey" + ], + "type": "object" + }, + "io.argoproj.common.S3Bucket": { + "description": "S3Bucket contains information to describe an S3 Bucket", + "properties": { + "key": { + "type": "string" + }, + "name": { + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.argoproj.common.S3Filter": { + "description": "S3Filter represents filters to apply to bucket notifications for specifying constraints on objects", + "properties": { + "prefix": { + "type": "string" + }, + "suffix": { + "type": "string" + } + }, + "required": [ + "prefix", + "suffix" + ], + "type": "object" + }, + "io.argoproj.common.SASLConfig": { + "description": "SASLConfig refers to SASL configuration for a client", + "properties": { + "mechanism": { + "description": "SASLMechanism is the name of the enabled SASL mechanism. Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).", + "type": "string" + }, + "passwordSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Password for SASL/PLAIN authentication" + }, + "userSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "User is the authentication identity (authcid) to present for SASL/PLAIN or SASL/SCRAM authentication" + } + }, + "type": "object" + }, + "io.argoproj.common.SchemaRegistryConfig": { + "description": "SchemaRegistryConfig refers to configuration for a client", + "properties": { + "auth": { + "$ref": "#/definitions/io.argoproj.common.BasicAuth", + "description": "SchemaRegistry - basic authentication" + }, + "schemaId": { + "description": "Schema ID", + "format": "int32", + "type": "integer" + }, + "url": { + "description": "Schema Registry URL.", + "type": "string" + } + }, + "required": [ + "url", + "schemaId" + ], + "type": "object" + }, + "io.argoproj.common.SecureHeader": { + "description": "SecureHeader refers to HTTP Headers with auth tokens as values", + "properties": { + "name": { + "type": "string" + }, + "valueFrom": { + "$ref": "#/definitions/io.argoproj.common.ValueFromSource", + "description": "Values can be read from either secrets or configmaps" + } + }, + "type": "object" + }, + "io.argoproj.common.Status": { + "description": "Status is a common structure which can be used for Status field.", + "properties": { + "conditions": { + "description": "Conditions are the latest available observations of a resource's current state.", + "items": { + "$ref": "#/definitions/io.argoproj.common.Condition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.argoproj.common.TLSConfig": { + "description": "TLSConfig refers to TLS configuration for a client.", + "properties": { + "caCertSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "CACertSecret refers to the secret that contains the CA cert" + }, + "clientCertSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ClientCertSecret refers to the secret that contains the client cert" + }, + "clientKeySecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ClientKeySecret refers to the secret that contains the client key" + }, + "insecureSkipVerify": { + "description": "If true, skips creation of TLSConfig with certs and creates an empty TLSConfig. (Defaults to false)", + "type": "boolean" + } + }, + "type": "object" + }, + "io.argoproj.common.ValueFromSource": { + "description": "ValueFromSource allows you to reference keys from either a Configmap or Secret", + "properties": { + "configMapKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector" + }, + "secretKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.BusConfig": { + "description": "BusConfig has the finalized configuration for EventBus", + "properties": { + "jetstream": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamConfig" + }, + "kafka": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus" + }, + "nats": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSConfig" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.ContainerTemplate": { + "description": "ContainerTemplate defines customized spec for a container", + "properties": { + "imagePullPolicy": { + "type": "string" + }, + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecurityContext" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.EventBus": { + "description": "EventBus is the definition of a eventbus resource", + "properties": { + "apiVersion": { + "const": "argoproj.io/v1alpha1", + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "const": "EventBus", + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.EventBusSpec" + }, + "status": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.EventBusStatus" + } + }, + "required": [ + "metadata", + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "argoproj.io", + "kind": "EventBus", + "version": "v1alpha1" + } + ] + }, + "io.argoproj.eventbus.v1alpha1.EventBusList": { + "description": "EventBusList is the list of eventbus resources", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "items": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.EventBus" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "required": [ + "metadata", + "items" + ], + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.EventBusSpec": { + "description": "EventBusSpec refers to specification of eventbus resource", + "properties": { + "jetstream": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamBus" + }, + "jetstreamExotic": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamConfig", + "description": "Exotic JetStream" + }, + "kafka": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus", + "description": "Kafka eventbus" + }, + "nats": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSBus", + "description": "NATS eventbus" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.EventBusStatus": { + "description": "EventBusStatus holds the status of the eventbus resource", + "properties": { + "conditions": { + "description": "Conditions are the latest available observations of a resource's current state.", + "items": { + "$ref": "#/definitions/io.argoproj.common.Condition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "config": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.BusConfig", + "description": "Config holds the fininalized configuration of EventBus" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.JetStreamBus": { + "description": "JetStreamBus holds the JetStream EventBus information", + "properties": { + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity", + "description": "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/" + }, + "containerTemplate": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate", + "description": "ContainerTemplate contains customized spec for Nats JetStream container" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "maxPayload": { + "description": "Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.argoproj.common.Metadata", + "description": "Metadata sets the pods's metadata, i.e. annotations and labels" + }, + "metricsContainerTemplate": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate", + "description": "MetricsContainerTemplate contains customized spec for metrics container" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object" + }, + "persistence": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.PersistenceStrategy" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "string" + }, + "reloaderContainerTemplate": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate", + "description": "ReloaderContainerTemplate contains customized spec for config reloader container" + }, + "replicas": { + "description": "JetStream StatefulSet size", + "format": "int32", + "type": "integer" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext", + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + }, + "serviceAccountName": { + "description": "ServiceAccountName to apply to the StatefulSet", + "type": "string" + }, + "settings": { + "description": "JetStream configuration, if not specified, global settings in controller-config will be used. See https://docs.nats.io/running-a-nats-service/configuration#jetstream. Only configure \"max_memory_store\" or \"max_file_store\", do not set \"store_dir\" as it has been hardcoded.", + "type": "string" + }, + "startArgs": { + "description": "Optional arguments to start nats-server. For example, \"-D\" to enable debugging output, \"-DV\" to enable debugging and tracing. Check https://docs.nats.io/ for all the available arguments.", + "items": { + "type": "string" + }, + "type": "array" + }, + "streamConfig": { + "description": "Optional configuration for the streams to be created in this JetStream service, if specified, it will be merged with the default configuration in controller-config. It accepts a YAML format configuration, available fields include, \"maxBytes\", \"maxMsgs\", \"maxAge\" (e.g. 72h), \"replicas\" (1, 3, 5), \"duplicates\" (e.g. 5m).", + "type": "string" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array" + }, + "version": { + "description": "JetStream version, such as \"2.7.3\"", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.JetStreamConfig": { + "properties": { + "accessSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Secret for auth" + }, + "streamConfig": { + "type": "string" + }, + "url": { + "description": "JetStream (Nats) URL", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.KafkaBus": { + "description": "KafkaBus holds the KafkaBus EventBus information", + "properties": { + "consumerGroup": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup", + "description": "Consumer group for kafka client" + }, + "sasl": { + "$ref": "#/definitions/io.argoproj.common.SASLConfig", + "description": "SASL configuration for the kafka client" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the kafka client." + }, + "topic": { + "description": "Topic name, defaults to {namespace_name}-{eventbus_name}", + "type": "string" + }, + "url": { + "description": "URL to kafka cluster, multiple URLs separated by comma", + "type": "string" + }, + "version": { + "description": "Kafka version, sarama defaults to the oldest supported stable version", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup": { + "properties": { + "groupName": { + "description": "Consumer group name, defaults to {namespace_name}-{sensor_name}", + "type": "string" + }, + "rebalanceStrategy": { + "description": "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.", + "type": "string" + }, + "startOldest": { + "description": "When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false", + "type": "boolean" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.NATSBus": { + "description": "NATSBus holds the NATS eventbus information", + "properties": { + "exotic": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSConfig", + "description": "Exotic holds an exotic NATS config" + }, + "native": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NativeStrategy", + "description": "Native means to bring up a native NATS service" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.NATSConfig": { + "description": "NATSConfig holds the config of NATS", + "properties": { + "accessSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Secret for auth" + }, + "auth": { + "description": "Auth strategy, default to AuthStrategyNone", + "type": "string" + }, + "clusterID": { + "description": "Cluster ID for nats streaming", + "type": "string" + }, + "url": { + "description": "NATS streaming url", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.NativeStrategy": { + "description": "NativeStrategy indicates to install a native NATS service", + "properties": { + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity", + "description": "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/" + }, + "auth": { + "type": "string" + }, + "containerTemplate": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate", + "description": "ContainerTemplate contains customized spec for NATS container" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "maxAge": { + "description": "Max Age of existing messages, i.e. \"72h\", “4h35m”", + "type": "string" + }, + "maxBytes": { + "description": "Total size of messages per channel, 0 means unlimited. Defaults to 1GB", + "type": "string" + }, + "maxMsgs": { + "description": "Maximum number of messages per channel, 0 means unlimited. Defaults to 1000000", + "format": "int64", + "type": "integer" + }, + "maxPayload": { + "description": "Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB", + "type": "string" + }, + "maxSubs": { + "description": "Maximum number of subscriptions per channel, 0 means unlimited. Defaults to 1000", + "format": "int64", + "type": "integer" + }, + "metadata": { + "$ref": "#/definitions/io.argoproj.common.Metadata", + "description": "Metadata sets the pods's metadata, i.e. annotations and labels" + }, + "metricsContainerTemplate": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate", + "description": "MetricsContainerTemplate contains customized spec for metrics container" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object" + }, + "persistence": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.PersistenceStrategy" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the EventSource pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "description": "If specified, indicates the EventSource pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "string" + }, + "raftCommitTimeout": { + "description": "Specifies the time without an Apply() operation before sending an heartbeat to ensure timely commit, i.e. \"72h\", “4h35m”. Defaults to 100ms", + "type": "string" + }, + "raftElectionTimeout": { + "description": "Specifies the time in candidate state without a leader before attempting an election, i.e. \"72h\", “4h35m”. Defaults to 2s", + "type": "string" + }, + "raftHeartbeatTimeout": { + "description": "Specifies the time in follower state without a leader before attempting an election, i.e. \"72h\", “4h35m”. Defaults to 2s", + "type": "string" + }, + "raftLeaseTimeout": { + "description": "Specifies how long a leader waits without being able to contact a quorum of nodes before stepping down as leader, i.e. \"72h\", “4h35m”. Defaults to 1s", + "type": "string" + }, + "replicas": { + "description": "Size is the NATS StatefulSet size", + "format": "int32", + "type": "integer" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext", + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + }, + "serviceAccountName": { + "description": "ServiceAccountName to apply to NATS StatefulSet", + "type": "string" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.argoproj.eventbus.v1alpha1.PersistenceStrategy": { + "description": "PersistenceStrategy defines the strategy of persistence", + "properties": { + "accessMode": { + "description": "Available access modes such as ReadWriteOnce, ReadWriteMany https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes", + "type": "string" + }, + "storageClassName": { + "description": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "type": "string" + }, + "volumeSize": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "Volume size, e.g. 10Gi" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AMQPConsumeConfig": { + "description": "AMQPConsumeConfig holds the configuration to immediately starts delivering queued messages", + "properties": { + "autoAck": { + "description": "AutoAck when true, the server will acknowledge deliveries to this consumer prior to writing the delivery to the network", + "type": "boolean" + }, + "consumerTag": { + "description": "ConsumerTag is the identity of the consumer included in every delivery", + "type": "string" + }, + "exclusive": { + "description": "Exclusive when true, the server will ensure that this is the sole consumer from this queue", + "type": "boolean" + }, + "noLocal": { + "description": "NoLocal flag is not supported by RabbitMQ", + "type": "boolean" + }, + "noWait": { + "description": "NowWait when true, do not wait for the server to confirm the request and immediately begin deliveries", + "type": "boolean" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AMQPEventSource": { + "description": "AMQPEventSource refers to an event-source for AMQP stream events", + "properties": { + "auth": { + "$ref": "#/definitions/io.argoproj.common.BasicAuth", + "description": "Auth hosts secret selectors for username and password" + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Backoff holds parameters applied to connection." + }, + "consume": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPConsumeConfig", + "description": "Consume holds the configuration to immediately starts delivering queued messages For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume" + }, + "exchangeDeclare": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPExchangeDeclareConfig", + "description": "ExchangeDeclare holds the configuration for the exchange on the server For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare" + }, + "exchangeName": { + "description": "ExchangeName is the exchange name For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html", + "type": "string" + }, + "exchangeType": { + "description": "ExchangeType is rabbitmq exchange type", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "queueBind": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPQueueBindConfig", + "description": "QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind" + }, + "queueDeclare": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPQueueDeclareConfig", + "description": "QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches the same parameters For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare" + }, + "routingKey": { + "description": "Routing key for bindings", + "type": "string" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the amqp client." + }, + "url": { + "description": "URL for rabbitmq service", + "type": "string" + }, + "urlSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "URLSecret is secret reference for rabbitmq service URL" + } + }, + "required": [ + "exchangeName", + "exchangeType", + "routingKey" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AMQPExchangeDeclareConfig": { + "description": "AMQPExchangeDeclareConfig holds the configuration for the exchange on the server", + "properties": { + "autoDelete": { + "description": "AutoDelete removes the exchange when no bindings are active", + "type": "boolean" + }, + "durable": { + "description": "Durable keeps the exchange also after the server restarts", + "type": "boolean" + }, + "internal": { + "description": "Internal when true does not accept publishings", + "type": "boolean" + }, + "noWait": { + "description": "NowWait when true does not wait for a confirmation from the server", + "type": "boolean" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AMQPQueueBindConfig": { + "description": "AMQPQueueBindConfig holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key", + "properties": { + "noWait": { + "description": "NowWait false and the queue could not be bound, the channel will be closed with an error", + "type": "boolean" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AMQPQueueDeclareConfig": { + "description": "AMQPQueueDeclareConfig holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches the same parameters", + "properties": { + "arguments": { + "description": "Arguments of a queue (also known as \"x-arguments\") used for optional features and plugins", + "type": "string" + }, + "autoDelete": { + "description": "AutoDelete removes the queue when no consumers are active", + "type": "boolean" + }, + "durable": { + "description": "Durable keeps the queue also after the server restarts", + "type": "boolean" + }, + "exclusive": { + "description": "Exclusive sets the queues to be accessible only by the connection that declares them and will be deleted wgen the connection closes", + "type": "boolean" + }, + "name": { + "description": "Name of the queue. If empty the server auto-generates a unique name for this queue", + "type": "string" + }, + "noWait": { + "description": "NowWait when true, the queue assumes to be declared on the server", + "type": "boolean" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AzureEventsHubEventSource": { + "description": "AzureEventsHubEventSource describes the event source for azure events hub More info at https://docs.microsoft.com/en-us/azure/event-hubs/", + "properties": { + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "fqdn": { + "description": "FQDN of the EventHubs namespace you created More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string", + "type": "string" + }, + "hubName": { + "description": "Event Hub path/name", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "sharedAccessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SharedAccessKey is the generated value of the key" + }, + "sharedAccessKeyName": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SharedAccessKeyName is the name you chose for your application's SAS keys" + } + }, + "required": [ + "fqdn", + "hubName" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AzureQueueStorageEventSource": { + "description": "AzureQueueStorageEventSource describes the event source for azure queue storage more info at https://learn.microsoft.com/en-us/azure/storage/queues/", + "properties": { + "connectionString": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ConnectionString is the connection string to access Azure Queue Storage. If this fields is not provided it will try to access via Azure AD with StorageAccountName." + }, + "decodeMessage": { + "description": "DecodeMessage specifies if all the messages should be base64 decoded. If set to true the decoding is done before the evaluation of JSONBody", + "type": "boolean" + }, + "dlq": { + "description": "DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false.", + "type": "boolean" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "queueName": { + "description": "QueueName is the name of the queue", + "type": "string" + }, + "storageAccountName": { + "description": "StorageAccountName is the name of the storage account where the queue is. This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set.", + "type": "string" + }, + "waitTimeInSeconds": { + "description": "WaitTimeInSeconds is the duration (in seconds) for which the event source waits between empty results from the queue. The default value is 3 seconds.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "queueName" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.AzureServiceBusEventSource": { + "description": "AzureServiceBusEventSource describes the event source for azure service bus More info at https://docs.microsoft.com/en-us/azure/service-bus-messaging/", + "properties": { + "connectionString": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ConnectionString is the connection string for the Azure Service Bus. If this fields is not provided it will try to access via Azure AD with DefaultAzureCredential and FullyQualifiedNamespace." + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "fullyQualifiedNamespace": { + "description": "FullyQualifiedNamespace is the Service Bus namespace name (ex: myservicebus.servicebus.windows.net). This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set.", + "type": "string" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "queueName": { + "description": "QueueName is the name of the Azure Service Bus Queue", + "type": "string" + }, + "subscriptionName": { + "description": "SubscriptionName is the name of the Azure Service Bus Topic Subscription", + "type": "string" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the service bus client" + }, + "topicName": { + "description": "TopicName is the name of the Azure Service Bus Topic", + "type": "string" + } + }, + "required": [ + "queueName", + "topicName", + "subscriptionName" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.BitbucketAuth": { + "description": "BitbucketAuth holds the different auth strategies for connecting to Bitbucket", + "properties": { + "basic": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketBasicAuth", + "description": "Basic is BasicAuth auth strategy." + }, + "oauthToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "OAuthToken refers to the K8s secret that holds the OAuth Bearer token." + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.BitbucketBasicAuth": { + "description": "BasicAuth holds the information required to authenticate user via basic auth mechanism", + "properties": { + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Password refers to the K8s secret that holds the password." + }, + "username": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Username refers to the K8s secret that holds the username." + } + }, + "required": [ + "username", + "password" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.BitbucketEventSource": { + "description": "BitbucketEventSource describes the event source for Bitbucket", + "properties": { + "auth": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketAuth", + "description": "Auth information required to connect to Bitbucket." + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the defined Bitbucket hook once the event source is stopped.", + "type": "boolean" + }, + "events": { + "description": "Events this webhook is subscribed to.", + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will be passed along the event payload.", + "type": "object" + }, + "owner": { + "description": "DeprecatedOwner is the owner of the repository. Deprecated: use Repositories instead. Will be unsupported in v1.9", + "type": "string" + }, + "projectKey": { + "description": "DeprecatedProjectKey is the key of the project to which the repository relates Deprecated: use Repositories instead. Will be unsupported in v1.9", + "type": "string" + }, + "repositories": { + "description": "Repositories holds a list of repositories for which integration needs to set up", + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketRepository" + }, + "type": "array" + }, + "repositorySlug": { + "description": "DeprecatedRepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL Deprecated: use Repositories instead. Will be unsupported in v1.9", + "type": "string" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook refers to the configuration required to run an http server" + } + }, + "required": [ + "webhook", + "auth", + "events" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.BitbucketRepository": { + "properties": { + "owner": { + "description": "Owner is the owner of the repository", + "type": "string" + }, + "repositorySlug": { + "description": "RepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL", + "type": "string" + } + }, + "required": [ + "owner", + "repositorySlug" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.BitbucketServerEventSource": { + "description": "BitbucketServerEventSource refers to event-source related to Bitbucket Server events", + "properties": { + "accessToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AccessToken is reference to K8s secret which holds the bitbucket api access information." + }, + "bitbucketserverBaseURL": { + "description": "BitbucketServerBaseURL is the base URL for API requests to a custom endpoint.", + "type": "string" + }, + "checkInterval": { + "description": "CheckInterval is a duration in which to wait before checking that the webhooks exist, e.g. 1s, 30m, 2h... (defaults to 1m)", + "type": "string" + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the Bitbucket Server hook for the project once the event source is stopped.", + "type": "boolean" + }, + "events": { + "description": "Events are bitbucket event to listen to. Refer https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html", + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "projectKey": { + "description": "DeprecatedProjectKey is the key of project for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8.", + "type": "string" + }, + "projects": { + "description": "Projects holds a list of projects for which integration needs to set up, this will add the webhook to all repositories in the project.", + "items": { + "type": "string" + }, + "type": "array" + }, + "repositories": { + "description": "Repositories holds a list of repositories for which integration needs to set up.", + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketServerRepository" + }, + "type": "array" + }, + "repositorySlug": { + "description": "DeprecatedRepositorySlug is the slug of the repository for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8.", + "type": "string" + }, + "skipBranchRefsChangedOnOpenPR": { + "description": "SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for branches whenever there's an associated open pull request. This helps in optimizing the event handling process by avoiding unnecessary triggers for branch reference changes that are already part of a pull request under review.", + "type": "boolean" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the bitbucketserver client." + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook holds configuration to run a http server." + }, + "webhookSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "WebhookSecret is reference to K8s secret which holds the bitbucket webhook secret (for HMAC validation)." + } + }, + "required": [ + "bitbucketserverBaseURL" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.BitbucketServerRepository": { + "properties": { + "projectKey": { + "description": "ProjectKey is the key of project for which integration needs to set up.", + "type": "string" + }, + "repositorySlug": { + "description": "RepositorySlug is the slug of the repository for which integration needs to set up.", + "type": "string" + } + }, + "required": [ + "projectKey", + "repositorySlug" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.CalendarEventSource": { + "description": "CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. Schedule takes precedence over interval; interval takes precedence over recurrence", + "properties": { + "exclusionDates": { + "description": "ExclusionDates defines the list of DATE-TIME exceptions for recurring events.", + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "interval": { + "description": "Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h...", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "persistence": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventPersistence", + "description": "Persistence hold the configuration for event persistence" + }, + "schedule": { + "description": "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + "type": "string" + }, + "timezone": { + "description": "Timezone in which to run the schedule", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.CatchupConfiguration": { + "properties": { + "enabled": { + "description": "Enabled enables to triggered the missed schedule when eventsource restarts", + "type": "boolean" + }, + "maxDuration": { + "description": "MaxDuration holds max catchup duration", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.ConfigMapPersistence": { + "properties": { + "createIfNotExist": { + "description": "CreateIfNotExist will create configmap if it doesn't exists", + "type": "boolean" + }, + "name": { + "description": "Name of the configmap", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.EmitterEventSource": { + "description": "EmitterEventSource describes the event source for emitter More info at https://emitter.io/develop/getting-started/", + "properties": { + "broker": { + "description": "Broker URI to connect to.", + "type": "string" + }, + "channelKey": { + "description": "ChannelKey refers to the channel key", + "type": "string" + }, + "channelName": { + "description": "ChannelName refers to the channel name", + "type": "string" + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Backoff holds parameters applied to connection." + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Password to use to connect to broker" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the emitter client." + }, + "username": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Username to use to connect to broker" + } + }, + "required": [ + "broker", + "channelKey", + "channelName" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.EventPersistence": { + "properties": { + "catchup": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.CatchupConfiguration", + "description": "Catchup enables to triggered the missed schedule when eventsource restarts" + }, + "configMap": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.ConfigMapPersistence", + "description": "ConfigMap holds configmap details for persistence" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.EventSource": { + "description": "EventSource is the definition of a eventsource resource", + "properties": { + "apiVersion": { + "const": "argoproj.io/v1alpha1", + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "const": "EventSource", + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceSpec" + }, + "status": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceStatus" + } + }, + "required": [ + "metadata", + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "argoproj.io", + "kind": "EventSource", + "version": "v1alpha1" + } + ] + }, + "io.argoproj.eventsource.v1alpha1.EventSourceFilter": { + "properties": { + "expression": { + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.EventSourceList": { + "description": "EventSourceList is the list of eventsource resources", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSource" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "required": [ + "metadata", + "items" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.EventSourceSpec": { + "description": "EventSourceSpec refers to specification of event-source resource", + "properties": { + "amqp": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPEventSource" + }, + "description": "AMQP event sources", + "type": "object" + }, + "azureEventsHub": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AzureEventsHubEventSource" + }, + "description": "AzureEventsHub event sources", + "type": "object" + }, + "azureQueueStorage": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AzureQueueStorageEventSource" + }, + "description": "AzureQueueStorage event source", + "type": "object" + }, + "azureServiceBus": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AzureServiceBusEventSource" + }, + "description": "Azure Service Bus event source", + "type": "object" + }, + "bitbucket": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketEventSource" + }, + "description": "Bitbucket event sources", + "type": "object" + }, + "bitbucketserver": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketServerEventSource" + }, + "description": "Bitbucket Server event sources", + "type": "object" + }, + "calendar": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.CalendarEventSource" + }, + "description": "Calendar event sources", + "type": "object" + }, + "emitter": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EmitterEventSource" + }, + "description": "Emitter event source", + "type": "object" + }, + "eventBusName": { + "description": "EventBusName references to a EventBus name. By default the value is \"default\"", + "type": "string" + }, + "file": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.FileEventSource" + }, + "description": "File event sources", + "type": "object" + }, + "generic": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GenericEventSource" + }, + "description": "Generic event source", + "type": "object" + }, + "gerrit": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GerritEventSource" + }, + "description": "Gerrit event source", + "type": "object" + }, + "github": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GithubEventSource" + }, + "description": "Github event sources", + "type": "object" + }, + "gitlab": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GitlabEventSource" + }, + "description": "Gitlab event sources", + "type": "object" + }, + "hdfs": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.HDFSEventSource" + }, + "description": "HDFS event sources", + "type": "object" + }, + "kafka": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.KafkaEventSource" + }, + "description": "Kafka event sources", + "type": "object" + }, + "minio": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.common.S3Artifact" + }, + "description": "Minio event sources", + "type": "object" + }, + "mqtt": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.MQTTEventSource" + }, + "description": "MQTT event sources", + "type": "object" + }, + "nats": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.NATSEventsSource" + }, + "description": "NATS event sources", + "type": "object" + }, + "nsq": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.NSQEventSource" + }, + "description": "NSQ event source", + "type": "object" + }, + "pubSub": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.PubSubEventSource" + }, + "description": "PubSub event sources", + "type": "object" + }, + "pulsar": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.PulsarEventSource" + }, + "description": "Pulsar event source", + "type": "object" + }, + "redis": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.RedisEventSource" + }, + "description": "Redis event source", + "type": "object" + }, + "redisStream": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.RedisStreamEventSource" + }, + "description": "Redis stream source", + "type": "object" + }, + "replicas": { + "description": "Replicas is the event source deployment replicas", + "format": "int32", + "type": "integer" + }, + "resource": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.ResourceEventSource" + }, + "description": "Resource event sources", + "type": "object" + }, + "service": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.Service", + "description": "Service is the specifications of the service to expose the event source" + }, + "sftp": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.SFTPEventSource" + }, + "description": "SFTP event sources", + "type": "object" + }, + "slack": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.SlackEventSource" + }, + "description": "Slack event sources", + "type": "object" + }, + "sns": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.SNSEventSource" + }, + "description": "SNS event sources", + "type": "object" + }, + "sqs": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.SQSEventSource" + }, + "description": "SQS event sources", + "type": "object" + }, + "storageGrid": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.StorageGridEventSource" + }, + "description": "StorageGrid event sources", + "type": "object" + }, + "stripe": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.StripeEventSource" + }, + "description": "Stripe event sources", + "type": "object" + }, + "template": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.Template", + "description": "Template is the pod specification for the event source" + }, + "webhook": { + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookEventSource" + }, + "description": "Webhook event sources", + "type": "object" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.EventSourceStatus": { + "description": "EventSourceStatus holds the status of the event-source resource", + "properties": { + "conditions": { + "description": "Conditions are the latest available observations of a resource's current state.", + "items": { + "$ref": "#/definitions/io.argoproj.common.Condition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.FileEventSource": { + "description": "FileEventSource describes an event-source for file related events.", + "properties": { + "eventType": { + "description": "Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "polling": { + "description": "Use polling instead of inotify", + "type": "boolean" + }, + "watchPathConfig": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WatchPathConfig", + "description": "WatchPathConfig contains configuration about the file path to watch" + } + }, + "required": [ + "eventType", + "watchPathConfig" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.GenericEventSource": { + "description": "GenericEventSource refers to a generic event source. It can be used to implement a custom event source.", + "properties": { + "authSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AuthSecret holds a secret selector that contains a bearer token for authentication" + }, + "config": { + "description": "Config is the event source configuration", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "insecure": { + "description": "Insecure determines the type of connection.", + "type": "boolean" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "url": { + "description": "URL of the gRPC server that implements the event source.", + "type": "string" + } + }, + "required": [ + "url", + "config" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.GerritEventSource": { + "description": "GerritEventSource refers to event-source related to gerrit events", + "properties": { + "auth": { + "$ref": "#/definitions/io.argoproj.common.BasicAuth", + "description": "Auth hosts secret selectors for username and password" + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the Gerrit hook for the project once the event source is stopped.", + "type": "boolean" + }, + "events": { + "description": "Events are gerrit event to listen to. Refer https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events", + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "gerritBaseURL": { + "description": "GerritBaseURL is the base URL for API requests to a custom endpoint", + "type": "string" + }, + "hookName": { + "description": "HookName is the name of the webhook", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "projects": { + "description": "List of project namespace paths like \"whynowy/test\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "sslVerify": { + "description": "SslVerify to enable ssl verification", + "type": "boolean" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook holds configuration to run a http server" + } + }, + "required": [ + "hookName", + "events", + "gerritBaseURL" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.GithubAppCreds": { + "properties": { + "appID": { + "description": "AppID refers to the GitHub App ID for the application you created", + "format": "int64", + "type": "integer" + }, + "installationID": { + "description": "InstallationID refers to the Installation ID of the GitHub app you created and installed", + "format": "int64", + "type": "integer" + }, + "privateKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "PrivateKey refers to a K8s secret containing the GitHub app private key" + } + }, + "required": [ + "privateKey", + "appID", + "installationID" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.GithubEventSource": { + "description": "GithubEventSource refers to event-source for github related events", + "properties": { + "active": { + "description": "Active refers to status of the webhook for event deliveries. https://developer.github.com/webhooks/creating/#active", + "type": "boolean" + }, + "apiToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "APIToken refers to a K8s secret containing github api token" + }, + "contentType": { + "description": "ContentType of the event delivery", + "type": "string" + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the GitHub hook for the repository once the event source is stopped.", + "type": "boolean" + }, + "events": { + "description": "Events refer to Github events to which the event source will subscribe", + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "githubApp": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GithubAppCreds", + "description": "GitHubApp holds the GitHub app credentials" + }, + "githubBaseURL": { + "description": "GitHub base URL (for GitHub Enterprise)", + "type": "string" + }, + "githubUploadURL": { + "description": "GitHub upload URL (for GitHub Enterprise)", + "type": "string" + }, + "id": { + "description": "Id is the webhook's id Deprecated: This is not used at all, will be removed in v1.6", + "format": "int64", + "type": "integer" + }, + "insecure": { + "description": "Insecure tls verification", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "organizations": { + "description": "Organizations holds the names of organizations (used for organization level webhooks). Not required if Repositories is set.", + "items": { + "type": "string" + }, + "type": "array" + }, + "owner": { + "description": "DeprecatedOwner refers to GitHub owner name i.e. argoproj Deprecated: use Repositories instead. Will be unsupported in v 1.6", + "type": "string" + }, + "payloadEnrichment": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.PayloadEnrichmentFlags", + "description": "PayloadEnrichment holds flags that determine whether to enrich GitHub's original payload with additional information." + }, + "repositories": { + "description": "Repositories holds the information of repositories, which uses repo owner as the key, and list of repo names as the value. Not required if Organizations is set.", + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.OwnedRepositories" + }, + "type": "array" + }, + "repository": { + "description": "DeprecatedRepository refers to GitHub repo name i.e. argo-events Deprecated: use Repositories instead. Will be unsupported in v 1.6", + "type": "string" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook refers to the configuration required to run a http server" + }, + "webhookSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "WebhookSecret refers to K8s secret containing GitHub webhook secret https://developer.github.com/webhooks/securing/" + } + }, + "required": [ + "events" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.GitlabEventSource": { + "description": "GitlabEventSource refers to event-source related to Gitlab events", + "properties": { + "accessToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AccessToken references to k8 secret which holds the gitlab api access information" + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the GitLab hook for the project once the event source is stopped.", + "type": "boolean" + }, + "enableSSLVerification": { + "description": "EnableSSLVerification to enable ssl verification", + "type": "boolean" + }, + "events": { + "description": "Events are gitlab event to listen to. Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794.", + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "gitlabBaseURL": { + "description": "GitlabBaseURL is the base URL for API requests to a custom endpoint", + "type": "string" + }, + "groups": { + "description": "List of group IDs or group name like \"test\". Group level hook available in Premium and Ultimate Gitlab.", + "items": { + "type": "string" + }, + "type": "array" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "projectID": { + "description": "DeprecatedProjectID is the id of project for which integration needs to setup Deprecated: use Projects instead. Will be unsupported in v 1.7", + "type": "string" + }, + "projects": { + "description": "List of project IDs or project namespace paths like \"whynowy/test\". Projects and groups cannot be empty at the same time.", + "items": { + "type": "string" + }, + "type": "array" + }, + "secretToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SecretToken references to k8 secret which holds the Secret Token used by webhook config" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook holds configuration to run a http server" + } + }, + "required": [ + "events", + "gitlabBaseURL" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.HDFSEventSource": { + "description": "HDFSEventSource refers to event-source for HDFS related events", + "properties": { + "addresses": { + "items": { + "type": "string" + }, + "type": "array" + }, + "checkInterval": { + "description": "CheckInterval is a string that describes an interval duration to check the directory state, e.g. 1s, 30m, 2h... (defaults to 1m)", + "type": "string" + }, + "directory": { + "description": "Directory to watch for events", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "hdfsUser": { + "description": "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", + "type": "string" + }, + "krbCCacheSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "KrbCCacheSecret is the secret selector for Kerberos ccache Either ccache or keytab can be set to use Kerberos." + }, + "krbConfigConfigMap": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector", + "description": "KrbConfig is the configmap selector for Kerberos config as string It must be set if either ccache or keytab is used." + }, + "krbKeytabSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "KrbKeytabSecret is the secret selector for Kerberos keytab Either ccache or keytab can be set to use Kerberos." + }, + "krbRealm": { + "description": "KrbRealm is the Kerberos realm used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + }, + "krbServicePrincipalName": { + "description": "KrbServicePrincipalName is the principal name of Kerberos service It must be set if either ccache or keytab is used.", + "type": "string" + }, + "krbUsername": { + "description": "KrbUsername is the Kerberos username used with Kerberos keytab It must be set if keytab is used.", + "type": "string" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "path": { + "description": "Path is relative path of object to watch with respect to the directory", + "type": "string" + }, + "pathRegexp": { + "description": "PathRegexp is regexp of relative path of object to watch with respect to the directory", + "type": "string" + }, + "type": { + "description": "Type of file operations to watch", + "type": "string" + } + }, + "required": [ + "directory", + "type", + "addresses" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.KafkaConsumerGroup": { + "properties": { + "groupName": { + "description": "The name for the consumer group to use", + "type": "string" + }, + "oldest": { + "description": "When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false", + "type": "boolean" + }, + "rebalanceStrategy": { + "description": "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.", + "type": "string" + } + }, + "required": [ + "groupName" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.KafkaEventSource": { + "description": "KafkaEventSource refers to event-source for Kafka related events", + "properties": { + "config": { + "description": "Yaml format Sarama config for Kafka connection. It follows the struct of sarama.Config. See https://github.com/IBM/sarama/blob/main/config.go e.g.\n\nconsumer:\n fetch:\n min: 1\nnet:\n MaxOpenRequests: 5", + "type": "string" + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Backoff holds parameters applied to connection." + }, + "consumerGroup": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.KafkaConsumerGroup", + "description": "Consumer group for kafka client" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "limitEventsPerSecond": { + "description": "Sets a limit on how many events get read from kafka per second.", + "format": "int64", + "type": "integer" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "partition": { + "description": "Partition name", + "type": "string" + }, + "sasl": { + "$ref": "#/definitions/io.argoproj.common.SASLConfig", + "description": "SASL configuration for the kafka client" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the kafka client." + }, + "topic": { + "description": "Topic name", + "type": "string" + }, + "url": { + "description": "URL to kafka cluster, multiple URLs separated by comma", + "type": "string" + }, + "version": { + "description": "Specify what kafka version is being connected to enables certain features in sarama, defaults to 1.0.0", + "type": "string" + } + }, + "required": [ + "url", + "topic" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.MQTTEventSource": { + "description": "MQTTEventSource refers to event-source for MQTT related events", + "properties": { + "auth": { + "$ref": "#/definitions/io.argoproj.common.BasicAuth", + "description": "Auth hosts secret selectors for username and password" + }, + "clientId": { + "description": "ClientID is the id of the client", + "type": "string" + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "ConnectionBackoff holds backoff applied to connection." + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the mqtt client." + }, + "topic": { + "description": "Topic name", + "type": "string" + }, + "url": { + "description": "URL to connect to broker", + "type": "string" + } + }, + "required": [ + "url", + "topic", + "clientId" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.NATSAuth": { + "description": "NATSAuth refers to the auth info for NATS EventSource", + "properties": { + "basic": { + "$ref": "#/definitions/io.argoproj.common.BasicAuth", + "description": "Baisc auth with username and password" + }, + "credential": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "credential used to connect" + }, + "nkey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "NKey used to connect" + }, + "token": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Token used to connect" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.NATSEventsSource": { + "description": "NATSEventsSource refers to event-source for NATS related events", + "properties": { + "auth": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.NATSAuth", + "description": "Auth information" + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "ConnectionBackoff holds backoff applied to connection." + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "queue": { + "description": "Queue is the name of the queue group to subscribe as if specified. Uses QueueSubscribe logic to subscribe as queue group. If the queue is empty, uses default Subscribe logic.", + "type": "string" + }, + "subject": { + "description": "Subject holds the name of the subject onto which messages are published", + "type": "string" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the nats client." + }, + "url": { + "description": "URL to connect to NATS cluster", + "type": "string" + } + }, + "required": [ + "url", + "subject" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.NSQEventSource": { + "description": "NSQEventSource describes the event source for NSQ PubSub More info at https://godoc.org/github.com/nsqio/go-nsq", + "properties": { + "channel": { + "description": "Channel used for subscription", + "type": "string" + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Backoff holds parameters applied to connection." + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "hostAddress": { + "description": "HostAddress is the address of the host for NSQ lookup", + "type": "string" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the nsq client." + }, + "topic": { + "description": "Topic to subscribe to.", + "type": "string" + } + }, + "required": [ + "hostAddress", + "topic", + "channel" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.OwnedRepositories": { + "properties": { + "names": { + "description": "Repository names", + "items": { + "type": "string" + }, + "type": "array" + }, + "owner": { + "description": "Organization or user name", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.PayloadEnrichmentFlags": { + "properties": { + "fetchPROnPRCommentAdded": { + "description": "FetchPROnPRCommentAdded determines whether to enrich the payload provided by GitHub on \"pull request comment added\" events, with the full pull request info", + "type": "boolean" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.PubSubEventSource": { + "description": "PubSubEventSource refers to event-source for GCP PubSub related events.", + "properties": { + "credentialSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "CredentialSecret references to the secret that contains JSON credentials to access GCP. If it is missing, it implicitly uses Workload Identity to access. https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity" + }, + "deleteSubscriptionOnFinish": { + "description": "DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub subscription once the event source is stopped.", + "type": "boolean" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "projectID": { + "description": "ProjectID is GCP project ID for the subscription. Required if you run Argo Events outside of GKE/GCE. (otherwise, the default value is its project)", + "type": "string" + }, + "subscriptionID": { + "description": "SubscriptionID is ID of subscription. Required if you use existing subscription. The default value will be auto generated hash based on this eventsource setting, so the subscription might be recreated every time you update the setting, which has a possibility of event loss.", + "type": "string" + }, + "topic": { + "description": "Topic to which the subscription should belongs. Required if you want the eventsource to create a new subscription. If you specify this field along with an existing subscription, it will be verified whether it actually belongs to the specified topic.", + "type": "string" + }, + "topicProjectID": { + "description": "TopicProjectID is GCP project ID for the topic. By default, it is same as ProjectID.", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.PulsarEventSource": { + "description": "PulsarEventSource describes the event source for Apache Pulsar", + "properties": { + "authAthenzParams": { + "additionalProperties": { + "type": "string" + }, + "description": "Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth.", + "type": "object" + }, + "authAthenzSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used." + }, + "authTokenSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Authentication token for the pulsar client. Either token or athenz can be set to use auth." + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Backoff holds parameters applied to connection." + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the pulsar client." + }, + "tlsAllowInsecureConnection": { + "description": "Whether the Pulsar client accept untrusted TLS certificate from broker.", + "type": "boolean" + }, + "tlsTrustCertsSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Trusted TLS certificate secret." + }, + "tlsValidateHostname": { + "description": "Whether the Pulsar client verify the validity of the host name from broker.", + "type": "boolean" + }, + "topics": { + "description": "Name of the topics to subscribe to.", + "items": { + "type": "string" + }, + "type": "array" + }, + "type": { + "description": "Type of the subscription. Only \"exclusive\" and \"shared\" is supported. Defaults to exclusive.", + "type": "string" + }, + "url": { + "description": "Configure the service URL for the Pulsar service.", + "type": "string" + } + }, + "required": [ + "topics", + "url" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.RedisEventSource": { + "description": "RedisEventSource describes an event source for the Redis PubSub. More info at https://godoc.org/github.com/go-redis/redis#example-PubSub", + "properties": { + "channels": { + "items": { + "type": "string" + }, + "type": "array" + }, + "db": { + "description": "DB to use. If not specified, default DB 0 will be used.", + "format": "int32", + "type": "integer" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "hostAddress": { + "description": "HostAddress refers to the address of the Redis host/server", + "type": "string" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "namespace": { + "description": "Namespace to use to retrieve the password from. It should only be specified if password is declared", + "type": "string" + }, + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Password required for authentication if any." + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the redis client." + }, + "username": { + "description": "Username required for ACL style authentication if any.", + "type": "string" + } + }, + "required": [ + "hostAddress", + "channels" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.RedisStreamEventSource": { + "description": "RedisStreamEventSource describes an event source for Redis streams (https://redis.io/topics/streams-intro)", + "properties": { + "consumerGroup": { + "description": "ConsumerGroup refers to the Redis stream consumer group that will be created on all redis streams. Messages are read through this group. Defaults to 'argo-events-cg'", + "type": "string" + }, + "db": { + "description": "DB to use. If not specified, default DB 0 will be used.", + "format": "int32", + "type": "integer" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "hostAddress": { + "description": "HostAddress refers to the address of the Redis host/server (master instance)", + "type": "string" + }, + "maxMsgCountPerRead": { + "description": "MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. Same as COUNT option in XREADGROUP(https://redis.io/topics/streams-intro). Defaults to 10", + "format": "int32", + "type": "integer" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Password required for authentication if any." + }, + "streams": { + "description": "Streams to look for entries. XREADGROUP is used on all streams using a single consumer group.", + "items": { + "type": "string" + }, + "type": "array" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the redis client." + }, + "username": { + "description": "Username required for ACL style authentication if any.", + "type": "string" + } + }, + "required": [ + "hostAddress", + "streams" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.ResourceEventSource": { + "description": "ResourceEventSource refers to a event-source for K8s resource related events.", + "properties": { + "cluster": { + "description": "Cluster from which events will be listened to", + "type": "string" + }, + "eventTypes": { + "description": "EventTypes is the list of event type to watch. Possible values are - ADD, UPDATE and DELETE.", + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.ResourceFilter", + "description": "Filter is applied on the metadata of the resource If you apply filter, then the internal event informer will only monitor objects that pass the filter." + }, + "group": { + "type": "string" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "namespace": { + "description": "Namespace where resource is deployed", + "type": "string" + }, + "resource": { + "type": "string" + }, + "version": { + "type": "string" + } + }, + "required": [ + "namespace", + "group", + "version", + "resource", + "eventTypes", + "cluster" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.ResourceFilter": { + "description": "ResourceFilter contains K8s ObjectMeta information to further filter resource event objects", + "properties": { + "afterStart": { + "description": "If the resource is created after the start time then the event is treated as valid.", + "type": "boolean" + }, + "createdBy": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "If resource is created before the specified time then the event is treated as valid." + }, + "fields": { + "description": "Fields provide field filters similar to K8s field selector (see https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/). Unlike K8s field selector, it supports arbitrary fileds like \"spec.serviceAccountName\", and the value could be a string or a regex. Same as K8s field selector, operator \"=\", \"==\" and \"!=\" are supported.", + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.Selector" + }, + "type": "array" + }, + "labels": { + "description": "Labels provide listing options to K8s API to watch resource/s. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info. Unlike K8s field selector, multiple values are passed as comma separated values instead of list of values. Eg: value: value1,value2. Same as K8s label selector, operator \"=\", \"==\", \"!=\", \"exists\", \"!\", \"notin\", \"in\", \"gt\" and \"lt\" are supported", + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.Selector" + }, + "type": "array" + }, + "prefix": { + "description": "Prefix filter is applied on the resource name.", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.SFTPEventSource": { + "description": "SFTPEventSource describes an event-source for sftp related events.", + "properties": { + "address": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Address sftp address." + }, + "eventType": { + "description": "Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Password required for authentication if any." + }, + "pollIntervalDuration": { + "description": "PollIntervalDuration the interval at which to poll the SFTP server defaults to 10 seconds", + "type": "string" + }, + "sshKeySecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SSHKeySecret refers to the secret that contains SSH key. Key needs to contain private key and public key." + }, + "username": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Username required for authentication if any." + }, + "watchPathConfig": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WatchPathConfig", + "description": "WatchPathConfig contains configuration about the file path to watch" + } + }, + "required": [ + "eventType", + "watchPathConfig" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.SNSEventSource": { + "description": "SNSEventSource refers to event-source for AWS SNS related events", + "properties": { + "accessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AccessKey refers K8s secret containing aws access key" + }, + "endpoint": { + "description": "Endpoint configures connection to a specific SNS endpoint instead of Amazons servers", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "region": { + "description": "Region is AWS region", + "type": "string" + }, + "roleARN": { + "description": "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + "type": "string" + }, + "secretKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SecretKey refers K8s secret containing aws secret key" + }, + "topicArn": { + "description": "TopicArn", + "type": "string" + }, + "validateSignature": { + "description": "ValidateSignature is boolean that can be set to true for SNS signature verification", + "type": "boolean" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook configuration for http server" + } + }, + "required": [ + "topicArn", + "region" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.SQSEventSource": { + "description": "SQSEventSource refers to event-source for AWS SQS related events", + "properties": { + "accessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AccessKey refers K8s secret containing aws access key" + }, + "dlq": { + "description": "DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false.", + "type": "boolean" + }, + "endpoint": { + "description": "Endpoint configures connection to a specific SQS endpoint instead of Amazons servers", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "queue": { + "description": "Queue is AWS SQS queue to listen to for messages", + "type": "string" + }, + "queueAccountId": { + "description": "QueueAccountID is the ID of the account that created the queue to monitor", + "type": "string" + }, + "region": { + "description": "Region is AWS region", + "type": "string" + }, + "roleARN": { + "description": "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + "type": "string" + }, + "secretKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SecretKey refers K8s secret containing aws secret key" + }, + "sessionToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SessionToken refers to K8s secret containing AWS temporary credentials(STS) session token" + }, + "waitTimeSeconds": { + "description": "WaitTimeSeconds is The duration (in seconds) for which the call waits for a message to arrive in the queue before returning.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "region", + "queue", + "waitTimeSeconds" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.Selector": { + "description": "Selector represents conditional operation to select K8s objects.", + "properties": { + "key": { + "description": "Key name", + "type": "string" + }, + "operation": { + "description": "Supported operations like ==, != etc. Defaults to ==. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info.", + "type": "string" + }, + "value": { + "description": "Value", + "type": "string" + } + }, + "required": [ + "key", + "value" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.Service": { + "description": "Service holds the service information eventsource exposes", + "properties": { + "clusterIP": { + "description": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "string" + }, + "ports": { + "description": "The list of ports that are exposed by this ClusterIP service.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServicePort" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "port", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "port", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.SlackEventSource": { + "description": "SlackEventSource refers to event-source for Slack related events", + "properties": { + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "signingSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Slack App signing secret" + }, + "token": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Token for URL verification handshake" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook holds configuration for a REST endpoint" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.StorageGridEventSource": { + "description": "StorageGridEventSource refers to event-source for StorageGrid related events", + "properties": { + "apiURL": { + "description": "APIURL is the url of the storagegrid api.", + "type": "string" + }, + "authToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Auth token for storagegrid api" + }, + "bucket": { + "description": "Name of the bucket to register notifications for.", + "type": "string" + }, + "events": { + "items": { + "type": "string" + }, + "type": "array" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.StorageGridFilter", + "description": "Filter on object key which caused the notification." + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "region": { + "description": "S3 region. Defaults to us-east-1", + "type": "string" + }, + "topicArn": { + "description": "TopicArn", + "type": "string" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook holds configuration for a REST endpoint" + } + }, + "required": [ + "topicArn", + "bucket", + "authToken", + "apiURL" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.StorageGridFilter": { + "description": "StorageGridFilter represents filters to apply to bucket notifications for specifying constraints on objects", + "properties": { + "prefix": { + "type": "string" + }, + "suffix": { + "type": "string" + } + }, + "required": [ + "prefix", + "suffix" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.StripeEventSource": { + "description": "StripeEventSource describes the event source for stripe webhook notifications More info at https://stripe.com/docs/webhooks", + "properties": { + "apiKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "APIKey refers to K8s secret that holds Stripe API key. Used only if CreateWebhook is enabled." + }, + "createWebhook": { + "description": "CreateWebhook if specified creates a new webhook programmatically.", + "type": "boolean" + }, + "eventFilter": { + "description": "EventFilter describes the type of events to listen to. If not specified, all types of events will be processed. More info at https://stripe.com/docs/api/events/list", + "items": { + "type": "string" + }, + "type": "array" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "webhook": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext", + "description": "Webhook holds configuration for a REST endpoint" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.Template": { + "description": "Template holds the information of an EventSource deployment template", + "properties": { + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity", + "description": "If specified, the pod's scheduling constraints" + }, + "container": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container", + "description": "Container is the main container image to run in the event source pod" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "metadata": { + "$ref": "#/definitions/io.argoproj.common.Metadata", + "description": "Metadata sets the pods's metadata, i.e. annotations and labels" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the EventSource pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "description": "If specified, indicates the EventSource pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "string" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext", + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + }, + "serviceAccountName": { + "description": "ServiceAccountName is the name of the ServiceAccount to use to run event source pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "type": "string" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array" + }, + "volumes": { + "description": "Volumes is a list of volumes that can be mounted by containers in an eventsource.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.WatchPathConfig": { + "properties": { + "directory": { + "description": "Directory to watch for events", + "type": "string" + }, + "path": { + "description": "Path is relative path of object to watch with respect to the directory", + "type": "string" + }, + "pathRegexp": { + "description": "PathRegexp is regexp of relative path of object to watch with respect to the directory", + "type": "string" + } + }, + "required": [ + "directory" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.WebhookContext": { + "description": "WebhookContext holds a general purpose REST API context", + "properties": { + "authSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AuthSecret holds a secret selector that contains a bearer token for authentication" + }, + "endpoint": { + "description": "REST API endpoint", + "type": "string" + }, + "maxPayloadSize": { + "description": "MaxPayloadSize is the maximum webhook payload size that the server will accept. Requests exceeding that limit will be rejected with \"request too large\" response. Default value: 1048576 (1MB).", + "format": "int64", + "type": "integer" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "method": { + "description": "Method is HTTP request method that indicates the desired action to be performed for a given resource. See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content", + "type": "string" + }, + "port": { + "description": "Port on which HTTP server is listening for incoming events.", + "type": "string" + }, + "serverCertSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ServerCertPath refers the file that contains the cert." + }, + "serverKeySecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ServerKeyPath refers the file that contains private key" + }, + "url": { + "description": "URL is the url of the server.", + "type": "string" + } + }, + "required": [ + "endpoint", + "method", + "port", + "url" + ], + "type": "object" + }, + "io.argoproj.eventsource.v1alpha1.WebhookEventSource": { + "description": "CalendarEventSource describes an HTTP based EventSource", + "properties": { + "authSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AuthSecret holds a secret selector that contains a bearer token for authentication" + }, + "endpoint": { + "description": "REST API endpoint", + "type": "string" + }, + "filter": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter", + "description": "Filter" + }, + "maxPayloadSize": { + "description": "MaxPayloadSize is the maximum webhook payload size that the server will accept. Requests exceeding that limit will be rejected with \"request too large\" response. Default value: 1048576 (1MB).", + "format": "int64", + "type": "integer" + }, + "metadata": { + "additionalProperties": { + "type": "string" + }, + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object" + }, + "method": { + "description": "Method is HTTP request method that indicates the desired action to be performed for a given resource. See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content", + "type": "string" + }, + "port": { + "description": "Port on which HTTP server is listening for incoming events.", + "type": "string" + }, + "serverCertSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ServerCertPath refers the file that contains the cert." + }, + "serverKeySecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ServerKeyPath refers the file that contains private key" + }, + "url": { + "description": "URL is the url of the server.", + "type": "string" + } + }, + "required": [ + "endpoint", + "method", + "port", + "url" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.AWSLambdaTrigger": { + "description": "AWSLambdaTrigger refers to specification of the trigger to invoke an AWS Lambda function", + "properties": { + "accessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AccessKey refers K8s secret containing aws access key" + }, + "functionName": { + "description": "FunctionName refers to the name of the function to invoke.", + "type": "string" + }, + "invocationType": { + "description": "Choose from the following options.\n\n * RequestResponse (default) - Invoke the function synchronously. Keep\n the connection open until the function returns a response or times out.\n The API response includes the function response and additional data.\n\n * Event - Invoke the function asynchronously. Send events that fail multiple\n times to the function's dead-letter queue (if it's configured). The API\n response only includes a status code.\n\n * DryRun - Validate parameter values and verify that the user or role\n has permission to invoke the function.", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "region": { + "description": "Region is AWS region", + "type": "string" + }, + "roleARN": { + "description": "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + "type": "string" + }, + "secretKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SecretKey refers K8s secret containing aws secret key" + } + }, + "required": [ + "functionName", + "region", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.ArgoWorkflowTrigger": { + "description": "ArgoWorkflowTrigger is the trigger for the Argo Workflow", + "properties": { + "args": { + "description": "Args is the list of arguments to pass to the argo CLI", + "items": { + "type": "string" + }, + "type": "array" + }, + "operation": { + "description": "Operation refers to the type of operation performed on the argo workflow resource. Default value is Submit.", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of parameters to pass to resolved Argo Workflow object", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "source": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ArtifactLocation", + "description": "Source of the K8s resource file(s)" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.ArtifactLocation": { + "description": "ArtifactLocation describes the source location for an external artifact", + "properties": { + "configmap": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector", + "description": "Configmap that stores the artifact" + }, + "file": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.FileArtifact", + "description": "File artifact is artifact stored in a file" + }, + "git": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.GitArtifact", + "description": "Git repository hosting the artifact" + }, + "inline": { + "description": "Inline artifact is embedded in sensor spec as a string", + "type": "string" + }, + "resource": { + "$ref": "#/definitions/io.argoproj.common.Resource", + "description": "Resource is generic template for K8s resource" + }, + "s3": { + "$ref": "#/definitions/io.argoproj.common.S3Artifact", + "description": "S3 compliant artifact" + }, + "url": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.URLArtifact", + "description": "URL to fetch the artifact from" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.AzureEventHubsTrigger": { + "description": "AzureEventHubsTrigger refers to specification of the Azure Event Hubs Trigger", + "properties": { + "fqdn": { + "description": "FQDN refers to the namespace dns of Azure Event Hubs to be used i.e. \u003cnamespace\u003e.servicebus.windows.net", + "type": "string" + }, + "hubName": { + "description": "HubName refers to the Azure Event Hub to send events to", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "sharedAccessKey": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SharedAccessKey refers to a K8s secret containing the primary key for the" + }, + "sharedAccessKeyName": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SharedAccessKeyName refers to the name of the Shared Access Key" + } + }, + "required": [ + "fqdn", + "hubName", + "sharedAccessKeyName", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.AzureServiceBusTrigger": { + "properties": { + "connectionString": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "ConnectionString is the connection string for the Azure Service Bus" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "queueName": { + "description": "QueueName is the name of the Azure Service Bus Queue", + "type": "string" + }, + "subscriptionName": { + "description": "SubscriptionName is the name of the Azure Service Bus Topic Subscription", + "type": "string" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the service bus client" + }, + "topicName": { + "description": "TopicName is the name of the Azure Service Bus Topic", + "type": "string" + } + }, + "required": [ + "queueName", + "topicName", + "subscriptionName", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.ConditionsResetByTime": { + "properties": { + "cron": { + "description": "Cron is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + "type": "string" + }, + "timezone": { + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.ConditionsResetCriteria": { + "properties": { + "byTime": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ConditionsResetByTime", + "description": "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.CustomTrigger": { + "description": "CustomTrigger refers to the specification of the custom trigger.", + "properties": { + "certSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "CertSecret refers to the secret that contains cert for secure connection between sensor and custom trigger gRPC server." + }, + "parameters": { + "description": "Parameters is the list of parameters that is applied to resolved custom trigger trigger object.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "secure": { + "description": "Secure refers to type of the connection between sensor to custom trigger gRPC", + "type": "boolean" + }, + "serverNameOverride": { + "description": "ServerNameOverride for the secure connection between sensor and custom trigger gRPC server.", + "type": "string" + }, + "serverURL": { + "description": "ServerURL is the url of the gRPC server that executes custom trigger", + "type": "string" + }, + "spec": { + "additionalProperties": { + "type": "string" + }, + "description": "Spec is the custom trigger resource specification that custom trigger gRPC server knows how to interpret.", + "type": "object" + } + }, + "required": [ + "serverURL", + "secure", + "spec", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.DataFilter": { + "description": "DataFilter describes constraints and filters for event data Regular Expressions are purposefully not a feature as they are overkill for our uses here See Rob Pike's Post: https://commandcenter.blogspot.com/2011/08/regular-expressions-in-lexing-and.html", + "properties": { + "comparator": { + "description": "Comparator compares the event data with a user given value. Can be \"\u003e=\", \"\u003e\", \"=\", \"!=\", \"\u003c\", or \"\u003c=\". Is optional, and if left blank treated as equality \"=\".", + "type": "string" + }, + "path": { + "description": "Path is the JSONPath of the event's (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters '*' and '?'. To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.", + "type": "string" + }, + "template": { + "description": "Template is a go-template for extracting a string from the event's data. A Template is evaluated with provided path, type and value. The templating follows the standard go-template syntax as well as sprig's extra functions. See https://pkg.go.dev/text/template and https://masterminds.github.io/sprig/", + "type": "string" + }, + "type": { + "description": "Type contains the JSON type of the data", + "type": "string" + }, + "value": { + "description": "Value is the allowed string values for this key Booleans are passed using strconv.ParseBool() Numbers are parsed using as float64 using strconv.ParseFloat() Strings are taken as is Nils this value is ignored", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "path", + "type", + "value" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.EmailTrigger": { + "description": "EmailTrigger refers to the specification of the email notification trigger.", + "properties": { + "body": { + "description": "Body refers to the body/content of the email send.", + "type": "string" + }, + "from": { + "description": "From refers to the address from which the email is send from.", + "type": "string" + }, + "host": { + "description": "Host refers to the smtp host url to which email is send.", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "port": { + "description": "Port refers to the smtp server port to which email is send. Defaults to 0.", + "format": "int32", + "type": "integer" + }, + "smtpPassword": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SMTPPassword refers to the Kubernetes secret that holds the smtp password used to connect to smtp server." + }, + "subject": { + "description": "Subject refers to the subject line for the email send.", + "type": "string" + }, + "to": { + "description": "To refers to the email addresses to which the emails are send.", + "items": { + "type": "string" + }, + "type": "array" + }, + "username": { + "description": "Username refers to the username used to connect to the smtp server.", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.Event": { + "description": "Event represents the cloudevent received from an event source.", + "properties": { + "context": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventContext" + }, + "data": { + "format": "byte", + "type": "string" + } + }, + "required": [ + "data" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.EventContext": { + "description": "EventContext holds the context of the cloudevent received from an event source.", + "properties": { + "datacontenttype": { + "description": "DataContentType - A MIME (RFC2046) string describing the media type of `data`.", + "type": "string" + }, + "id": { + "description": "ID of the event; must be non-empty and unique within the scope of the producer.", + "type": "string" + }, + "source": { + "description": "Source - A URI describing the event producer.", + "type": "string" + }, + "specversion": { + "description": "SpecVersion - The version of the CloudEvents specification used by the event.", + "type": "string" + }, + "subject": { + "description": "Subject - The subject of the event in the context of the event producer", + "type": "string" + }, + "time": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Time - A Timestamp when the event happened." + }, + "type": { + "description": "Type - The type of the occurrence which has happened.", + "type": "string" + } + }, + "required": [ + "id", + "source", + "specversion", + "type", + "datacontenttype", + "subject", + "time" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.EventDependency": { + "description": "EventDependency describes a dependency", + "properties": { + "eventName": { + "description": "EventName is the name of the event", + "type": "string" + }, + "eventSourceName": { + "description": "EventSourceName is the name of EventSource that Sensor depends on", + "type": "string" + }, + "filters": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventDependencyFilter", + "description": "Filters and rules governing toleration of success and constraints on the context and data of an event" + }, + "filtersLogicalOperator": { + "description": "FiltersLogicalOperator defines how different filters are evaluated together. Available values: and (\u0026\u0026), or (||) Is optional and if left blank treated as and (\u0026\u0026).", + "type": "string" + }, + "name": { + "description": "Name is a unique name of this dependency", + "type": "string" + }, + "transform": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventDependencyTransformer", + "description": "Transform transforms the event data" + } + }, + "required": [ + "name", + "eventSourceName", + "eventName" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.EventDependencyFilter": { + "description": "EventDependencyFilter defines filters and constraints for a event.", + "properties": { + "context": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventContext", + "description": "Context filter constraints" + }, + "data": { + "description": "Data filter constraints with escalation", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.DataFilter" + }, + "type": "array" + }, + "dataLogicalOperator": { + "description": "DataLogicalOperator defines how multiple Data filters (if defined) are evaluated together. Available values: and (\u0026\u0026), or (||) Is optional and if left blank treated as and (\u0026\u0026).", + "type": "string" + }, + "exprLogicalOperator": { + "description": "ExprLogicalOperator defines how multiple Exprs filters (if defined) are evaluated together. Available values: and (\u0026\u0026), or (||) Is optional and if left blank treated as and (\u0026\u0026).", + "type": "string" + }, + "exprs": { + "description": "Exprs contains the list of expressions evaluated against the event payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ExprFilter" + }, + "type": "array" + }, + "script": { + "description": "Script refers to a Lua script evaluated to determine the validity of an event.", + "type": "string" + }, + "time": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TimeFilter", + "description": "Time filter on the event with escalation" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.EventDependencyTransformer": { + "description": "EventDependencyTransformer transforms the event", + "properties": { + "jq": { + "description": "JQ holds the jq command applied for transformation", + "type": "string" + }, + "script": { + "description": "Script refers to a Lua script used to transform the event", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.ExprFilter": { + "properties": { + "expr": { + "description": "Expr refers to the expression that determines the outcome of the filter.", + "type": "string" + }, + "fields": { + "description": "Fields refers to set of keys that refer to the paths within event payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.PayloadField" + }, + "type": "array" + } + }, + "required": [ + "expr", + "fields" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.FileArtifact": { + "description": "FileArtifact contains information about an artifact in a filesystem", + "properties": { + "path": { + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.GitArtifact": { + "description": "GitArtifact contains information about an artifact stored in git", + "properties": { + "branch": { + "description": "Branch to use to pull trigger resource", + "type": "string" + }, + "cloneDirectory": { + "description": "Directory to clone the repository. We clone complete directory because GitArtifact is not limited to any specific Git service providers. Hence we don't use any specific git provider client.", + "type": "string" + }, + "creds": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.GitCreds", + "description": "Creds contain reference to git username and password" + }, + "filePath": { + "description": "Path to file that contains trigger resource definition", + "type": "string" + }, + "insecureIgnoreHostKey": { + "description": "Whether to ignore host key", + "type": "boolean" + }, + "ref": { + "description": "Ref to use to pull trigger resource. Will result in a shallow clone and fetch.", + "type": "string" + }, + "remote": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.GitRemoteConfig", + "description": "Remote to manage set of tracked repositories. Defaults to \"origin\". Refer https://git-scm.com/docs/git-remote" + }, + "sshKeySecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SSHKeySecret refers to the secret that contains SSH key" + }, + "tag": { + "description": "Tag to use to pull trigger resource", + "type": "string" + }, + "url": { + "description": "Git URL", + "type": "string" + } + }, + "required": [ + "url", + "cloneDirectory", + "filePath" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.GitCreds": { + "description": "GitCreds contain reference to git username and password", + "properties": { + "password": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "username": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.GitRemoteConfig": { + "description": "GitRemoteConfig contains the configuration of a Git remote", + "properties": { + "name": { + "description": "Name of the remote to fetch from.", + "type": "string" + }, + "urls": { + "description": "URLs the URLs of a remote repository. It must be non-empty. Fetch will always use the first URL, while push will use all of them.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "name", + "urls" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.HTTPTrigger": { + "description": "HTTPTrigger is the trigger for the HTTP request", + "properties": { + "basicAuth": { + "$ref": "#/definitions/io.argoproj.common.BasicAuth", + "description": "BasicAuth configuration for the http request." + }, + "headers": { + "additionalProperties": { + "type": "string" + }, + "description": "Headers for the HTTP request.", + "type": "object" + }, + "method": { + "description": "Method refers to the type of the HTTP request. Refer https://golang.org/src/net/http/method.go for more info. Default value is POST.", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the HTTP trigger resource.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "secureHeaders": { + "description": "Secure Headers stored in Kubernetes Secrets for the HTTP requests.", + "items": { + "$ref": "#/definitions/io.argoproj.common.SecureHeader" + }, + "type": "array" + }, + "timeout": { + "description": "Timeout refers to the HTTP request timeout in seconds. Default value is 60 seconds.", + "format": "int64", + "type": "integer" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the HTTP client." + }, + "url": { + "description": "URL refers to the URL to send HTTP request to.", + "type": "string" + } + }, + "required": [ + "url", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.K8SResourcePolicy": { + "description": "K8SResourcePolicy refers to the policy used to check the state of K8s based triggers using labels", + "properties": { + "backoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Backoff before checking resource state" + }, + "errorOnBackoffTimeout": { + "description": "ErrorOnBackoffTimeout determines whether sensor should transition to error state if the trigger policy is unable to determine the state of the resource", + "type": "boolean" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Labels required to identify whether a resource is in success state", + "type": "object" + } + }, + "required": [ + "backoff", + "errorOnBackoffTimeout" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.KafkaTrigger": { + "description": "KafkaTrigger refers to the specification of the Kafka trigger.", + "properties": { + "compress": { + "description": "Compress determines whether to compress message or not. Defaults to false. If set to true, compresses message using snappy compression.", + "type": "boolean" + }, + "flushFrequency": { + "description": "FlushFrequency refers to the frequency in milliseconds to flush batches. Defaults to 500 milliseconds.", + "format": "int32", + "type": "integer" + }, + "parameters": { + "description": "Parameters is the list of parameters that is applied to resolved Kafka trigger object.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "partition": { + "description": "DEPRECATED", + "format": "int32", + "type": "integer" + }, + "partitioningKey": { + "description": "The partitioning key for the messages put on the Kafka topic.", + "type": "string" + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "requiredAcks": { + "description": "RequiredAcks used in producer to tell the broker how many replica acknowledgements Defaults to 1 (Only wait for the leader to ack).", + "format": "int32", + "type": "integer" + }, + "sasl": { + "$ref": "#/definitions/io.argoproj.common.SASLConfig", + "description": "SASL configuration for the kafka client" + }, + "schemaRegistry": { + "$ref": "#/definitions/io.argoproj.common.SchemaRegistryConfig", + "description": "Schema Registry configuration to producer message with avro format" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the Kafka producer." + }, + "topic": { + "description": "Name of the topic. More info at https://kafka.apache.org/documentation/#intro_topics", + "type": "string" + }, + "url": { + "description": "URL of the Kafka broker, multiple URLs separated by comma.", + "type": "string" + }, + "version": { + "description": "Specify what kafka version is being connected to enables certain features in sarama, defaults to 1.0.0", + "type": "string" + } + }, + "required": [ + "url", + "topic", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.LogTrigger": { + "properties": { + "intervalSeconds": { + "description": "Only print messages every interval. Useful to prevent logging too much data for busy events.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.NATSTrigger": { + "description": "NATSTrigger refers to the specification of the NATS trigger.", + "properties": { + "parameters": { + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "subject": { + "description": "Name of the subject to put message on.", + "type": "string" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the NATS producer." + }, + "url": { + "description": "URL of the NATS cluster.", + "type": "string" + } + }, + "required": [ + "url", + "subject", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.OpenWhiskTrigger": { + "description": "OpenWhiskTrigger refers to the specification of the OpenWhisk trigger.", + "properties": { + "actionName": { + "description": "Name of the action/function.", + "type": "string" + }, + "authToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "AuthToken for authentication." + }, + "host": { + "description": "Host URL of the OpenWhisk.", + "type": "string" + }, + "namespace": { + "description": "Namespace for the action. Defaults to \"_\".", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "version": { + "description": "Version for the API. Defaults to v1.", + "type": "string" + } + }, + "required": [ + "host", + "actionName", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.PayloadField": { + "description": "PayloadField binds a value at path within the event payload against a name.", + "properties": { + "name": { + "description": "Name acts as key that holds the value at the path.", + "type": "string" + }, + "path": { + "description": "Path is the JSONPath of the event's (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters '*' and '?'. To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.", + "type": "string" + } + }, + "required": [ + "path", + "name" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.PulsarTrigger": { + "description": "PulsarTrigger refers to the specification of the Pulsar trigger.", + "properties": { + "authAthenzParams": { + "additionalProperties": { + "type": "string" + }, + "description": "Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth.", + "type": "object" + }, + "authAthenzSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used." + }, + "authTokenSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Authentication token for the pulsar client. Either token or athenz can be set to use auth." + }, + "connectionBackoff": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Backoff holds parameters applied to connection." + }, + "parameters": { + "description": "Parameters is the list of parameters that is applied to resolved Kafka trigger object.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "tls": { + "$ref": "#/definitions/io.argoproj.common.TLSConfig", + "description": "TLS configuration for the pulsar client." + }, + "tlsAllowInsecureConnection": { + "description": "Whether the Pulsar client accept untrusted TLS certificate from broker.", + "type": "boolean" + }, + "tlsTrustCertsSecret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Trusted TLS certificate secret." + }, + "tlsValidateHostname": { + "description": "Whether the Pulsar client verify the validity of the host name from broker.", + "type": "boolean" + }, + "topic": { + "description": "Name of the topic. See https://pulsar.apache.org/docs/en/concepts-messaging/", + "type": "string" + }, + "url": { + "description": "Configure the service URL for the Pulsar service.", + "type": "string" + } + }, + "required": [ + "url", + "topic", + "payload" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.RateLimit": { + "properties": { + "requestsPerUnit": { + "format": "int32", + "type": "integer" + }, + "unit": { + "description": "Defaults to Second", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.Sensor": { + "description": "Sensor is the definition of a sensor resource", + "properties": { + "apiVersion": { + "const": "argoproj.io/v1alpha1", + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "const": "Sensor", + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SensorSpec" + }, + "status": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SensorStatus" + } + }, + "required": [ + "metadata", + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "argoproj.io", + "kind": "Sensor", + "version": "v1alpha1" + } + ] + }, + "io.argoproj.sensor.v1alpha1.SensorList": { + "description": "SensorList is the list of Sensor resources", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.Sensor" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "required": [ + "metadata", + "items" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.SensorSpec": { + "description": "SensorSpec represents desired sensor state", + "properties": { + "dependencies": { + "description": "Dependencies is a list of the events that this sensor is dependent on.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventDependency" + }, + "type": "array" + }, + "errorOnFailedRound": { + "description": "ErrorOnFailedRound if set to true, marks sensor state as `error` if the previous trigger round fails. Once sensor state is set to `error`, no further triggers will be processed.", + "type": "boolean" + }, + "eventBusName": { + "description": "EventBusName references to a EventBus name. By default the value is \"default\"", + "type": "string" + }, + "loggingFields": { + "additionalProperties": { + "type": "string" + }, + "description": "LoggingFields add additional key-value pairs when logging happens", + "type": "object" + }, + "replicas": { + "description": "Replicas is the sensor deployment replicas", + "format": "int32", + "type": "integer" + }, + "revisionHistoryLimit": { + "description": "RevisionHistoryLimit specifies how many old deployment revisions to retain", + "format": "int32", + "type": "integer" + }, + "template": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.Template", + "description": "Template is the pod specification for the sensor" + }, + "triggers": { + "description": "Triggers is a list of the things that this sensor evokes. These are the outputs from this sensor.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.Trigger" + }, + "type": "array" + } + }, + "required": [ + "dependencies", + "triggers" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.SensorStatus": { + "description": "SensorStatus contains information about the status of a sensor.", + "properties": { + "conditions": { + "description": "Conditions are the latest available observations of a resource's current state.", + "items": { + "$ref": "#/definitions/io.argoproj.common.Condition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.SlackSender": { + "properties": { + "icon": { + "description": "Icon is the Slack application's icon, e.g. :robot_face: or https://example.com/image.png", + "type": "string" + }, + "username": { + "description": "Username is the Slack application's username", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.SlackThread": { + "properties": { + "broadcastMessageToChannel": { + "description": "BroadcastMessageToChannel allows to also broadcast the message from the thread to the channel", + "type": "boolean" + }, + "messageAggregationKey": { + "description": "MessageAggregationKey allows to aggregate the messages to a thread by some key.", + "type": "string" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.SlackTrigger": { + "description": "SlackTrigger refers to the specification of the slack notification trigger.", + "properties": { + "attachments": { + "description": "Attachments is a JSON format string that represents an array of Slack attachments according to the attachments API: https://api.slack.com/reference/messaging/attachments .", + "type": "string" + }, + "blocks": { + "description": "Blocks is a JSON format string that represents an array of Slack blocks according to the blocks API: https://api.slack.com/reference/block-kit/blocks .", + "type": "string" + }, + "channel": { + "description": "Channel refers to which Slack channel to send Slack message.", + "type": "string" + }, + "message": { + "description": "Message refers to the message to send to the Slack channel.", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "sender": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SlackSender", + "description": "Sender refers to additional configuration of the Slack application that sends the message." + }, + "slackToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "SlackToken refers to the Kubernetes secret that holds the slack token required to send messages." + }, + "thread": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SlackThread", + "description": "Thread refers to additional options for sending messages to a Slack thread." + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.StandardK8STrigger": { + "description": "StandardK8STrigger is the standard Kubernetes resource trigger", + "properties": { + "liveObject": { + "description": "LiveObject specifies whether the resource should be directly fetched from K8s instead of being marshaled from the resource artifact. If set to true, the resource artifact must contain the information required to uniquely identify the resource in the cluster, that is, you must specify \"apiVersion\", \"kind\" as well as \"name\" and \"namespace\" meta data. Only valid for operation type `update`", + "type": "boolean" + }, + "operation": { + "description": "Operation refers to the type of operation performed on the k8s resource. Default value is Create.", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of parameters that is applied to resolved K8s trigger object.", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "patchStrategy": { + "description": "PatchStrategy controls the K8s object patching strategy when the trigger operation is specified as patch. possible values: \"application/json-patch+json\" \"application/merge-patch+json\" \"application/strategic-merge-patch+json\" \"application/apply-patch+yaml\". Defaults to \"application/merge-patch+json\"", + "type": "string" + }, + "source": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ArtifactLocation", + "description": "Source of the K8s resource file(s)" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.StatusPolicy": { + "description": "StatusPolicy refers to the policy used to check the state of the trigger using response status", + "properties": { + "allow": { + "items": { + "format": "int32", + "type": "integer" + }, + "type": "array" + } + }, + "required": [ + "allow" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.Template": { + "description": "Template holds the information of a sensor deployment template", + "properties": { + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity", + "description": "If specified, the pod's scheduling constraints" + }, + "container": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container", + "description": "Container is the main container image to run in the sensor pod" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "metadata": { + "$ref": "#/definitions/io.argoproj.common.Metadata", + "description": "Metadata sets the pods's metadata, i.e. annotations and labels" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the EventSource pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "description": "If specified, indicates the EventSource pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "string" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext", + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + }, + "serviceAccountName": { + "description": "ServiceAccountName is the name of the ServiceAccount to use to run sensor pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "type": "string" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array" + }, + "volumes": { + "description": "Volumes is a list of volumes that can be mounted by containers in a workflow.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.TimeFilter": { + "description": "TimeFilter describes a window in time. It filters out events that occur outside the time limits. In other words, only events that occur after Start and before Stop will pass this filter.", + "properties": { + "start": { + "description": "Start is the beginning of a time window in UTC. Before this time, events for this dependency are ignored. Format is hh:mm:ss.", + "type": "string" + }, + "stop": { + "description": "Stop is the end of a time window in UTC. After or equal to this time, events for this dependency are ignored and Format is hh:mm:ss. If it is smaller than Start, it is treated as next day of Start (e.g.: 22:00:00-01:00:00 means 22:00:00-25:00:00).", + "type": "string" + } + }, + "required": [ + "start", + "stop" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.Trigger": { + "description": "Trigger is an action taken, output produced, an event created, a message sent", + "properties": { + "atLeastOnce": { + "description": "AtLeastOnce determines the trigger execution semantics. Defaults to false. Trigger execution will use at-most-once semantics. If set to true, Trigger execution will switch to at-least-once semantics.", + "type": "boolean" + }, + "parameters": { + "description": "Parameters is the list of parameters applied to the trigger template definition", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + }, + "type": "array" + }, + "policy": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerPolicy", + "description": "Policy to configure backoff and execution criteria for the trigger" + }, + "rateLimit": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.RateLimit", + "description": "Rate limit, default unit is Second" + }, + "retryStrategy": { + "$ref": "#/definitions/io.argoproj.common.Backoff", + "description": "Retry strategy, defaults to no retry" + }, + "template": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerTemplate", + "description": "Template describes the trigger specification." + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.TriggerParameter": { + "description": "TriggerParameter indicates a passed parameter to a service template", + "properties": { + "dest": { + "description": "Dest is the JSONPath of a resource key. A path is a series of keys separated by a dot. The colon character can be escaped with '.' The -1 key can be used to append a value to an existing array. See https://github.com/tidwall/sjson#path-syntax for more information about how this is used.", + "type": "string" + }, + "operation": { + "description": "Operation is what to do with the existing value at Dest, whether to 'prepend', 'overwrite', or 'append' it.", + "type": "string" + }, + "src": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameterSource", + "description": "Src contains a source reference to the value of the parameter from a dependency" + } + }, + "required": [ + "dest" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.TriggerParameterSource": { + "description": "TriggerParameterSource defines the source for a parameter from a event event", + "properties": { + "contextKey": { + "description": "ContextKey is the JSONPath of the event's (JSON decoded) context key ContextKey is a series of keys separated by a dot. A key may contain wildcard characters '*' and '?'. To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.", + "type": "string" + }, + "contextTemplate": { + "description": "ContextTemplate is a go-template for extracting a string from the event's context. If a ContextTemplate is provided with a ContextKey, the template will be evaluated first and fallback to the ContextKey. The templating follows the standard go-template syntax as well as sprig's extra functions. See https://pkg.go.dev/text/template and https://masterminds.github.io/sprig/", + "type": "string" + }, + "dataKey": { + "description": "DataKey is the JSONPath of the event's (JSON decoded) data key DataKey is a series of keys separated by a dot. A key may contain wildcard characters '*' and '?'. To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.", + "type": "string" + }, + "dataTemplate": { + "description": "DataTemplate is a go-template for extracting a string from the event's data. If a DataTemplate is provided with a DataKey, the template will be evaluated first and fallback to the DataKey. The templating follows the standard go-template syntax as well as sprig's extra functions. See https://pkg.go.dev/text/template and https://masterminds.github.io/sprig/", + "type": "string" + }, + "dependencyName": { + "description": "DependencyName refers to the name of the dependency. The event which is stored for this dependency is used as payload for the parameterization. Make sure to refer to one of the dependencies you have defined under Dependencies list.", + "type": "string" + }, + "useRawData": { + "description": "UseRawData indicates if the value in an event at data key should be used without converting to string. When true, a number, boolean, json or string parameter may be extracted. When the field is unspecified, or explicitly false, the behavior is to turn the extracted field into a string. (e.g. when set to true, the parameter 123 will resolve to the numerical type, but when false, or not provided, the string \"123\" will be resolved)", + "type": "boolean" + }, + "value": { + "description": "Value is the default literal value to use for this parameter source This is only used if the DataKey is invalid. If the DataKey is invalid and this is not defined, this param source will produce an error.", + "type": "string" + } + }, + "required": [ + "dependencyName" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.TriggerPolicy": { + "description": "TriggerPolicy dictates the policy for the trigger retries", + "properties": { + "k8s": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.K8SResourcePolicy", + "description": "K8SResourcePolicy refers to the policy used to check the state of K8s based triggers using using labels" + }, + "status": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.StatusPolicy", + "description": "Status refers to the policy used to check the state of the trigger using response status" + } + }, + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.TriggerTemplate": { + "description": "TriggerTemplate is the template that describes trigger specification.", + "properties": { + "argoWorkflow": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ArgoWorkflowTrigger", + "description": "ArgoWorkflow refers to the trigger that can perform various operations on an Argo workflow." + }, + "awsLambda": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.AWSLambdaTrigger", + "description": "AWSLambda refers to the trigger designed to invoke AWS Lambda function with with on-the-fly constructable payload." + }, + "azureEventHubs": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.AzureEventHubsTrigger", + "description": "AzureEventHubs refers to the trigger send an event to an Azure Event Hub." + }, + "azureServiceBus": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.AzureServiceBusTrigger", + "description": "AzureServiceBus refers to the trigger designed to place messages on Azure Service Bus" + }, + "conditions": { + "description": "Conditions is the conditions to execute the trigger. For example: \"(dep01 || dep02) \u0026\u0026 dep04\"", + "type": "string" + }, + "conditionsReset": { + "description": "Criteria to reset the conditons", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ConditionsResetCriteria" + }, + "type": "array" + }, + "custom": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.CustomTrigger", + "description": "CustomTrigger refers to the trigger designed to connect to a gRPC trigger server and execute a custom trigger." + }, + "email": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EmailTrigger", + "description": "Email refers to the trigger designed to send an email notification" + }, + "http": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.HTTPTrigger", + "description": "HTTP refers to the trigger designed to dispatch a HTTP request with on-the-fly constructable payload." + }, + "k8s": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.StandardK8STrigger", + "description": "StandardK8STrigger refers to the trigger designed to create or update a generic Kubernetes resource." + }, + "kafka": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.KafkaTrigger", + "description": "Kafka refers to the trigger designed to place messages on Kafka topic." + }, + "log": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.LogTrigger", + "description": "Log refers to the trigger designed to invoke log the event." + }, + "name": { + "description": "Name is a unique name of the action to take.", + "type": "string" + }, + "nats": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.NATSTrigger", + "description": "NATS refers to the trigger designed to place message on NATS subject." + }, + "openWhisk": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.OpenWhiskTrigger", + "description": "OpenWhisk refers to the trigger designed to invoke OpenWhisk action." + }, + "pulsar": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.PulsarTrigger", + "description": "Pulsar refers to the trigger designed to place messages on Pulsar topic." + }, + "slack": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SlackTrigger", + "description": "Slack refers to the trigger designed to send slack notification message." + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.argoproj.sensor.v1alpha1.URLArtifact": { + "description": "URLArtifact contains information about an artifact at an http endpoint.", + "properties": { + "path": { + "description": "Path is the complete URL", + "type": "string" + }, + "verifyCert": { + "description": "VerifyCert decides whether the connection is secure or not", + "type": "boolean" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.MatchCondition": { + "description": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", + "type": "string" + }, + "name": { + "description": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "type": "string" + } + }, + "required": [ + "name", + "expression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.MutatingWebhook": { + "description": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "properties": { + "admissionReviewVersions": { + "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "items": { + "type": "string" + }, + "type": "array" + }, + "clientConfig": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig", + "description": "ClientConfig defines how to communicate with the hook. Required" + }, + "failurePolicy": { + "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "name": { + "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "type": "string" + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything." + }, + "objectSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything." + }, + "reinvocationPolicy": { + "description": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "type": "string" + }, + "rules": { + "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" + }, + "type": "array" + }, + "sideEffects": { + "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "type": "string" + }, + "timeoutSeconds": { + "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "name", + "clientConfig", + "sideEffects", + "admissionReviewVersions" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration": { + "description": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "webhooks": { + "description": "Webhooks is a list of webhooks and the affected resources and operations.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhook" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "MutatingWebhookConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationList": { + "description": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of MutatingWebhookConfiguration.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "MutatingWebhookConfigurationList", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.RuleWithOperations": { + "description": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "properties": { + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ServiceReference": { + "description": "ServiceReference holds a reference to Service.legacy.k8s.io", + "properties": { + "name": { + "description": "`name` is the name of the service. Required", + "type": "string" + }, + "namespace": { + "description": "`namespace` is the namespace of the service. Required", + "type": "string" + }, + "path": { + "description": "`path` is an optional URL path which will be sent in any request to this service.", + "type": "string" + }, + "port": { + "description": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "namespace", + "name" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhook": { + "description": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "properties": { + "admissionReviewVersions": { + "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "items": { + "type": "string" + }, + "type": "array" + }, + "clientConfig": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig", + "description": "ClientConfig defines how to communicate with the hook. Required" + }, + "failurePolicy": { + "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "name": { + "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "type": "string" + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything." + }, + "objectSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything." + }, + "rules": { + "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" + }, + "type": "array" + }, + "sideEffects": { + "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "type": "string" + }, + "timeoutSeconds": { + "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "name", + "clientConfig", + "sideEffects", + "admissionReviewVersions" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration": { + "description": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "webhooks": { + "description": "Webhooks is a list of webhooks and the affected resources and operations.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhook" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingWebhookConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationList": { + "description": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingWebhookConfiguration.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingWebhookConfigurationList", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.WebhookClientConfig": { + "description": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "properties": { + "caBundle": { + "description": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", + "format": "byte", + "type": "string" + }, + "service": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ServiceReference", + "description": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`." + }, + "url": { + "description": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation": { + "description": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "properties": { + "key": { + "description": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "type": "string" + }, + "valueExpression": { + "description": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", + "type": "string" + } + }, + "required": [ + "key", + "valueExpression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning": { + "description": "ExpressionWarning is a warning information that targets a specific expression.", + "properties": { + "fieldRef": { + "description": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "type": "string" + }, + "warning": { + "description": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", + "type": "string" + } + }, + "required": [ + "fieldRef", + "warning" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.MatchCondition": { + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", + "type": "string" + }, + "name": { + "description": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "type": "string" + } + }, + "required": [ + "name", + "expression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.MatchResources": { + "description": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "properties": { + "excludeResourceRules": { + "description": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything." + }, + "objectSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything." + }, + "resourceRules": { + "description": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations": { + "description": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "properties": { + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.ParamKind": { + "description": "ParamKind is a tuple of Group Kind and Version.", + "properties": { + "apiVersion": { + "description": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "type": "string" + }, + "kind": { + "description": "Kind is the API kind the resources belong to. Required.", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.ParamRef": { + "description": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "properties": { + "name": { + "description": "`name` is the name of the resource being referenced.\n\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "type": "string" + }, + "parameterNotFoundAction": { + "description": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny` Default to `Deny`", + "type": "string" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset." + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.TypeChecking": { + "description": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "properties": { + "expressionWarnings": { + "description": "The type checking warnings for each expression.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy": { + "description": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec", + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicy." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus", + "description": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicy", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding": { + "description": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec", + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBinding", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList": { + "description": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of PolicyBinding.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBindingList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec": { + "description": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "properties": { + "matchResources": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.MatchResources", + "description": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required." + }, + "paramRef": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ParamRef", + "description": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param." + }, + "policyName": { + "description": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "type": "string" + }, + "validationActions": { + "description": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList": { + "description": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingAdmissionPolicy.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec": { + "description": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "properties": { + "auditAnnotations": { + "description": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "failurePolicy": { + "description": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.MatchCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchConstraints": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.MatchResources", + "description": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required." + }, + "paramKind": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ParamKind", + "description": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null." + }, + "validations": { + "description": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.Validation" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "variables": { + "description": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.Variable" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus": { + "description": "ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.", + "properties": { + "conditions": { + "description": "The conditions represent the latest available observations of a policy's current state.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "observedGeneration": { + "description": "The generation observed by the controller.", + "format": "int64", + "type": "integer" + }, + "typeChecking": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.TypeChecking", + "description": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking." + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.Validation": { + "description": "Validation specifies the CEL expression which is used to apply the validation.", + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ \u003e 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop \u003e 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "type": "string" + }, + "messageExpression": { + "description": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", + "type": "string" + }, + "reason": { + "description": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "type": "string" + } + }, + "required": [ + "expression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1alpha1.Variable": { + "description": "Variable is the definition of a variable that is used for composition.", + "properties": { + "expression": { + "description": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", + "type": "string" + }, + "name": { + "description": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "type": "string" + } + }, + "required": [ + "name", + "expression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.AuditAnnotation": { + "description": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "properties": { + "key": { + "description": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "type": "string" + }, + "valueExpression": { + "description": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", + "type": "string" + } + }, + "required": [ + "key", + "valueExpression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.ExpressionWarning": { + "description": "ExpressionWarning is a warning information that targets a specific expression.", + "properties": { + "fieldRef": { + "description": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "type": "string" + }, + "warning": { + "description": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", + "type": "string" + } + }, + "required": [ + "fieldRef", + "warning" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.MatchCondition": { + "description": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", + "type": "string" + }, + "name": { + "description": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "type": "string" + } + }, + "required": [ + "name", + "expression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.MatchResources": { + "description": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "properties": { + "excludeResourceRules": { + "description": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything." + }, + "objectSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything." + }, + "resourceRules": { + "description": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations": { + "description": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "properties": { + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.ParamKind": { + "description": "ParamKind is a tuple of Group Kind and Version.", + "properties": { + "apiVersion": { + "description": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "type": "string" + }, + "kind": { + "description": "Kind is the API kind the resources belong to. Required.", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.ParamRef": { + "description": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "properties": { + "name": { + "description": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "type": "string" + }, + "parameterNotFoundAction": { + "description": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", + "type": "string" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset." + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.TypeChecking": { + "description": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "properties": { + "expressionWarnings": { + "description": "The type checking warnings for each expression.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ExpressionWarning" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy": { + "description": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec", + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicy." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus", + "description": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicy", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding": { + "description": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec", + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBinding", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingList": { + "description": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of PolicyBinding.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBindingList", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec": { + "description": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "properties": { + "matchResources": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.MatchResources", + "description": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required." + }, + "paramRef": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ParamRef", + "description": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param." + }, + "policyName": { + "description": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "type": "string" + }, + "validationActions": { + "description": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyList": { + "description": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingAdmissionPolicy.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyList", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec": { + "description": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "properties": { + "auditAnnotations": { + "description": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.AuditAnnotation" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "failurePolicy": { + "description": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.MatchCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchConstraints": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.MatchResources", + "description": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required." + }, + "paramKind": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ParamKind", + "description": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null." + }, + "validations": { + "description": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.Validation" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "variables": { + "description": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.Variable" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus": { + "description": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "properties": { + "conditions": { + "description": "The conditions represent the latest available observations of a policy's current state.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "observedGeneration": { + "description": "The generation observed by the controller.", + "format": "int64", + "type": "integer" + }, + "typeChecking": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.TypeChecking", + "description": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking." + } + }, + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.Validation": { + "description": "Validation specifies the CEL expression which is used to apply the validation.", + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ \u003e 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop \u003e 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "type": "string" + }, + "messageExpression": { + "description": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", + "type": "string" + }, + "reason": { + "description": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "type": "string" + } + }, + "required": [ + "expression" + ], + "type": "object" + }, + "io.k8s.api.admissionregistration.v1beta1.Variable": { + "description": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "properties": { + "expression": { + "description": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", + "type": "string" + }, + "name": { + "description": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "type": "string" + } + }, + "required": [ + "name", + "expression" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion": { + "description": "An API server instance reports the version it can decode and the version it encodes objects to when persisting objects in the backend.", + "properties": { + "apiServerID": { + "description": "The ID of the reporting API server.", + "type": "string" + }, + "decodableVersions": { + "description": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "encodingVersion": { + "description": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).", + "type": "string" + }, + "servedVersions": { + "description": "The API server can serve these versions. DecodableVersions must include all ServedVersions.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "type": "object" + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersion": { + "description": "Storage version of a specific resource.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "The name is \u003cgroup\u003e.\u003cresource\u003e." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec", + "description": "Spec is an empty spec. It is here to comply with Kubernetes API style." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus", + "description": "API server instances report the version they can decode and the version they encode objects to when persisting objects in the backend." + } + }, + "required": [ + "spec", + "status" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "internal.apiserver.k8s.io", + "kind": "StorageVersion", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition": { + "description": "Describes the state of the storageVersion at a certain point.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transitioned from one status to another." + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "observedGeneration": { + "description": "If set, this represents the .metadata.generation that the condition was set based upon.", + "format": "int64", + "type": "integer" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of the condition.", + "type": "string" + } + }, + "required": [ + "type", + "status", + "reason" + ], + "type": "object" + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionList": { + "description": "A list of StorageVersions.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items holds a list of StorageVersion", + "items": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersion" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "internal.apiserver.k8s.io", + "kind": "StorageVersionList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec": { + "description": "StorageVersionSpec is an empty spec.", + "type": "object" + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus": { + "description": "API server instances report the versions they can decode and the version they encode objects to when persisting objects in the backend.", + "properties": { + "commonEncodingVersion": { + "description": "If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality.", + "type": "string" + }, + "conditions": { + "description": "The latest available observations of the storageVersion's state.", + "items": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "storageVersions": { + "description": "The reported versions per API server instance.", + "items": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "apiServerID" + ], + "x-kubernetes-list-type": "map" + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.ControllerRevision": { + "description": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "data": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension", + "description": "Data is the serialized representation of the state." + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "revision": { + "description": "Revision indicates the revision of the state represented by Data.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "revision" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ControllerRevision", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.ControllerRevisionList": { + "description": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of ControllerRevisions", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ControllerRevision" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ControllerRevisionList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DaemonSet": { + "description": "DaemonSet represents the configuration of a daemon set.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetSpec", + "description": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetStatus", + "description": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "DaemonSet", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DaemonSetCondition": { + "description": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transitioned from one status to another." + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of DaemonSet condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.DaemonSetList": { + "description": "DaemonSetList is a collection of daemon sets.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "A list of daemon sets.", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSet" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "DaemonSetList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DaemonSetSpec": { + "description": "DaemonSetSpec is the specification of a daemon set.", + "properties": { + "minReadySeconds": { + "description": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "format": "int32", + "type": "integer" + }, + "revisionHistoryLimit": { + "description": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "format": "int32", + "type": "integer" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" + }, + "template": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec", + "description": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template" + }, + "updateStrategy": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetUpdateStrategy", + "description": "An update strategy to replace existing DaemonSet pods with new pods." + } + }, + "required": [ + "selector", + "template" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.DaemonSetStatus": { + "description": "DaemonSetStatus represents the current status of a daemon set.", + "properties": { + "collisionCount": { + "description": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "format": "int32", + "type": "integer" + }, + "conditions": { + "description": "Represents the latest available observations of a DaemonSet's current state.", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentNumberScheduled": { + "description": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "format": "int32", + "type": "integer" + }, + "desiredNumberScheduled": { + "description": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "format": "int32", + "type": "integer" + }, + "numberAvailable": { + "description": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "format": "int32", + "type": "integer" + }, + "numberMisscheduled": { + "description": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "format": "int32", + "type": "integer" + }, + "numberReady": { + "description": "numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition.", + "format": "int32", + "type": "integer" + }, + "numberUnavailable": { + "description": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "format": "int32", + "type": "integer" + }, + "observedGeneration": { + "description": "The most recent generation observed by the daemon set controller.", + "format": "int64", + "type": "integer" + }, + "updatedNumberScheduled": { + "description": "The total number of nodes that are running updated daemon pod", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "currentNumberScheduled", + "numberMisscheduled", + "desiredNumberScheduled", + "numberReady" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.DaemonSetUpdateStrategy": { + "description": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + "properties": { + "rollingUpdate": { + "$ref": "#/definitions/io.k8s.api.apps.v1.RollingUpdateDaemonSet", + "description": "Rolling update config params. Present only if type = \"RollingUpdate\"." + }, + "type": { + "description": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.Deployment": { + "description": "Deployment enables declarative updates for Pods and ReplicaSets.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentSpec", + "description": "Specification of the desired behavior of the Deployment." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentStatus", + "description": "Most recently observed status of the Deployment." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "Deployment", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DeploymentCondition": { + "description": "DeploymentCondition describes the state of a deployment at a certain point.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transitioned from one status to another." + }, + "lastUpdateTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "The last time this condition was updated." + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of deployment condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.DeploymentList": { + "description": "DeploymentList is a list of Deployments.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of Deployments.", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.Deployment" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "DeploymentList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DeploymentSpec": { + "description": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "format": "int32", + "type": "integer" + }, + "paused": { + "description": "Indicates that the deployment is paused.", + "type": "boolean" + }, + "progressDeadlineSeconds": { + "description": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "format": "int32", + "type": "integer" + }, + "replicas": { + "description": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "format": "int32", + "type": "integer" + }, + "revisionHistoryLimit": { + "description": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "format": "int32", + "type": "integer" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels." + }, + "strategy": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentStrategy", + "description": "The deployment strategy to use to replace existing pods with new ones.", + "x-kubernetes-patch-strategy": "retainKeys" + }, + "template": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec", + "description": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\"." + } + }, + "required": [ + "selector", + "template" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.DeploymentStatus": { + "description": "DeploymentStatus is the most recently observed status of the Deployment.", + "properties": { + "availableReplicas": { + "description": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "format": "int32", + "type": "integer" + }, + "collisionCount": { + "description": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + "format": "int32", + "type": "integer" + }, + "conditions": { + "description": "Represents the latest available observations of a deployment's current state.", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "observedGeneration": { + "description": "The generation observed by the deployment controller.", + "format": "int64", + "type": "integer" + }, + "readyReplicas": { + "description": "readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.", + "format": "int32", + "type": "integer" + }, + "replicas": { + "description": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "format": "int32", + "type": "integer" + }, + "unavailableReplicas": { + "description": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "format": "int32", + "type": "integer" + }, + "updatedReplicas": { + "description": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.DeploymentStrategy": { + "description": "DeploymentStrategy describes how to replace existing pods with new ones.", + "properties": { + "rollingUpdate": { + "$ref": "#/definitions/io.k8s.api.apps.v1.RollingUpdateDeployment", + "description": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate." + }, + "type": { + "description": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.ReplicaSet": { + "description": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetSpec", + "description": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetStatus", + "description": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ReplicaSet", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.ReplicaSetCondition": { + "description": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "The last time the condition transitioned from one status to another." + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of replica set condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.ReplicaSetList": { + "description": "ReplicaSetList is a collection of ReplicaSets.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSet" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ReplicaSetList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.ReplicaSetSpec": { + "description": "ReplicaSetSpec is the specification of a ReplicaSet.", + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "format": "int32", + "type": "integer" + }, + "replicas": { + "description": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "format": "int32", + "type": "integer" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" + }, + "template": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec", + "description": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template" + } + }, + "required": [ + "selector" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.ReplicaSetStatus": { + "description": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "properties": { + "availableReplicas": { + "description": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "format": "int32", + "type": "integer" + }, + "conditions": { + "description": "Represents the latest available observations of a replica set's current state.", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "fullyLabeledReplicas": { + "description": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "format": "int32", + "type": "integer" + }, + "observedGeneration": { + "description": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "format": "int64", + "type": "integer" + }, + "readyReplicas": { + "description": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.", + "format": "int32", + "type": "integer" + }, + "replicas": { + "description": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "replicas" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.RollingUpdateDaemonSet": { + "description": "Spec to control the desired behavior of daemon set rolling update.", + "properties": { + "maxSurge": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption." + }, + "maxUnavailable": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update." + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.RollingUpdateDeployment": { + "description": "Spec to control the desired behavior of rolling update.", + "properties": { + "maxSurge": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods." + }, + "maxUnavailable": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods." + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy": { + "description": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "properties": { + "maxUnavailable": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable." + }, + "partition": { + "description": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.StatefulSet": { + "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetSpec", + "description": "Spec defines the desired identities of pods in this set." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetStatus", + "description": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "StatefulSet", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.StatefulSetCondition": { + "description": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transitioned from one status to another." + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of statefulset condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.StatefulSetList": { + "description": "StatefulSetList is a collection of StatefulSets.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of stateful sets.", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSet" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "StatefulSetList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.StatefulSetOrdinals": { + "description": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "properties": { + "start": { + "description": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy": { + "description": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "properties": { + "whenDeleted": { + "description": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "type": "string" + }, + "whenScaled": { + "description": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.apps.v1.StatefulSetSpec": { + "description": "A StatefulSetSpec is the specification of a StatefulSet.", + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "format": "int32", + "type": "integer" + }, + "ordinals": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetOrdinals", + "description": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta." + }, + "persistentVolumeClaimRetentionPolicy": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy", + "description": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional" + }, + "podManagementPolicy": { + "description": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "type": "string" + }, + "replicas": { + "description": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "format": "int32", + "type": "integer" + }, + "revisionHistoryLimit": { + "description": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "format": "int32", + "type": "integer" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" + }, + "serviceName": { + "description": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "type": "string" + }, + "template": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec", + "description": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format \u003cstatefulsetname\u003e-\u003cpodindex\u003e. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\"." + }, + "updateStrategy": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetUpdateStrategy", + "description": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template." + }, + "volumeClaimTemplates": { + "description": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim" + }, + "type": "array" + } + }, + "required": [ + "selector", + "template", + "serviceName" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.StatefulSetStatus": { + "description": "StatefulSetStatus represents the current state of a StatefulSet.", + "properties": { + "availableReplicas": { + "description": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.", + "format": "int32", + "type": "integer" + }, + "collisionCount": { + "description": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "format": "int32", + "type": "integer" + }, + "conditions": { + "description": "Represents the latest available observations of a statefulset's current state.", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentReplicas": { + "description": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "format": "int32", + "type": "integer" + }, + "currentRevision": { + "description": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "type": "string" + }, + "observedGeneration": { + "description": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "format": "int64", + "type": "integer" + }, + "readyReplicas": { + "description": "readyReplicas is the number of pods created for this StatefulSet with a Ready Condition.", + "format": "int32", + "type": "integer" + }, + "replicas": { + "description": "replicas is the number of Pods created by the StatefulSet controller.", + "format": "int32", + "type": "integer" + }, + "updateRevision": { + "description": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "type": "string" + }, + "updatedReplicas": { + "description": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "replicas" + ], + "type": "object" + }, + "io.k8s.api.apps.v1.StatefulSetUpdateStrategy": { + "description": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "properties": { + "rollingUpdate": { + "$ref": "#/definitions/io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy", + "description": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType." + }, + "type": { + "description": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authentication.v1.BoundObjectReference": { + "description": "BoundObjectReference is a reference to an object that a token is bound to.", + "properties": { + "apiVersion": { + "description": "API version of the referent.", + "type": "string" + }, + "kind": { + "description": "Kind of the referent. Valid kinds are 'Pod' and 'Secret'.", + "type": "string" + }, + "name": { + "description": "Name of the referent.", + "type": "string" + }, + "uid": { + "description": "UID of the referent.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authentication.v1.SelfSubjectReview": { + "description": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.SelfSubjectReviewStatus", + "description": "Status is filled in by the server with the user attributes." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "SelfSubjectReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authentication.v1.SelfSubjectReviewStatus": { + "description": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "properties": { + "userInfo": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo", + "description": "User attributes of the user making this request." + } + }, + "type": "object" + }, + "io.k8s.api.authentication.v1.TokenRequest": { + "description": "TokenRequest requests a token for a given service account.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenRequestSpec", + "description": "Spec holds information about the request being evaluated" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenRequestStatus", + "description": "Status is filled in by the server and indicates whether the token can be authenticated." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "TokenRequest", + "version": "v1" + } + ] + }, + "io.k8s.api.authentication.v1.TokenRequestSpec": { + "description": "TokenRequestSpec contains client provided parameters of a token request.", + "properties": { + "audiences": { + "description": "Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", + "items": { + "type": "string" + }, + "type": "array" + }, + "boundObjectRef": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.BoundObjectReference", + "description": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation." + }, + "expirationSeconds": { + "description": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "audiences" + ], + "type": "object" + }, + "io.k8s.api.authentication.v1.TokenRequestStatus": { + "description": "TokenRequestStatus is the result of a token request.", + "properties": { + "expirationTimestamp": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "ExpirationTimestamp is the time of expiration of the returned token." + }, + "token": { + "description": "Token is the opaque bearer token.", + "type": "string" + } + }, + "required": [ + "token", + "expirationTimestamp" + ], + "type": "object" + }, + "io.k8s.api.authentication.v1.TokenReview": { + "description": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenReviewSpec", + "description": "Spec holds information about the request being evaluated" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenReviewStatus", + "description": "Status is filled in by the server and indicates whether the request can be authenticated." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "TokenReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authentication.v1.TokenReviewSpec": { + "description": "TokenReviewSpec is a description of the token authentication request.", + "properties": { + "audiences": { + "description": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", + "items": { + "type": "string" + }, + "type": "array" + }, + "token": { + "description": "Token is the opaque bearer token.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authentication.v1.TokenReviewStatus": { + "description": "TokenReviewStatus is the result of the token authentication request.", + "properties": { + "audiences": { + "description": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", + "items": { + "type": "string" + }, + "type": "array" + }, + "authenticated": { + "description": "Authenticated indicates that the token was associated with a known user.", + "type": "boolean" + }, + "error": { + "description": "Error indicates that the token couldn't be checked", + "type": "string" + }, + "user": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo", + "description": "User is the UserInfo associated with the provided token." + } + }, + "type": "object" + }, + "io.k8s.api.authentication.v1.UserInfo": { + "description": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "properties": { + "extra": { + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "description": "Any additional information provided by the authenticator.", + "type": "object" + }, + "groups": { + "description": "The names of groups this user is a part of.", + "items": { + "type": "string" + }, + "type": "array" + }, + "uid": { + "description": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "type": "string" + }, + "username": { + "description": "The name that uniquely identifies this user among all active users.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authentication.v1alpha1.SelfSubjectReview": { + "description": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authentication.v1alpha1.SelfSubjectReviewStatus", + "description": "Status is filled in by the server with the user attributes." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "SelfSubjectReview", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.authentication.v1alpha1.SelfSubjectReviewStatus": { + "description": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "properties": { + "userInfo": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo", + "description": "User attributes of the user making this request." + } + }, + "type": "object" + }, + "io.k8s.api.authentication.v1beta1.SelfSubjectReview": { + "description": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authentication.v1beta1.SelfSubjectReviewStatus", + "description": "Status is filled in by the server with the user attributes." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "SelfSubjectReview", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.authentication.v1beta1.SelfSubjectReviewStatus": { + "description": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "properties": { + "userInfo": { + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo", + "description": "User attributes of the user making this request." + } + }, + "type": "object" + }, + "io.k8s.api.authorization.v1.LocalSubjectAccessReview": { + "description": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec", + "description": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus", + "description": "Status is filled in by the server and indicates whether the request is allowed or not" + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "LocalSubjectAccessReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.NonResourceAttributes": { + "description": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "properties": { + "path": { + "description": "Path is the URL path of the request", + "type": "string" + }, + "verb": { + "description": "Verb is the standard HTTP verb", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authorization.v1.NonResourceRule": { + "description": "NonResourceRule holds information that describes a rule for the non-resource", + "properties": { + "nonResourceURLs": { + "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", + "items": { + "type": "string" + }, + "type": "array" + }, + "verbs": { + "description": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "verbs" + ], + "type": "object" + }, + "io.k8s.api.authorization.v1.ResourceAttributes": { + "description": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "properties": { + "group": { + "description": "Group is the API Group of the Resource. \"*\" means all.", + "type": "string" + }, + "name": { + "description": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "type": "string" + }, + "resource": { + "description": "Resource is one of the existing resource types. \"*\" means all.", + "type": "string" + }, + "subresource": { + "description": "Subresource is one of the existing resource types. \"\" means none.", + "type": "string" + }, + "verb": { + "description": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "type": "string" + }, + "version": { + "description": "Version is the API Version of the Resource. \"*\" means all.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authorization.v1.ResourceRule": { + "description": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "properties": { + "apiGroups": { + "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", + "items": { + "type": "string" + }, + "type": "array" + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", + "items": { + "type": "string" + }, + "type": "array" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", + "items": { + "type": "string" + }, + "type": "array" + }, + "verbs": { + "description": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "verbs" + ], + "type": "object" + }, + "io.k8s.api.authorization.v1.SelfSubjectAccessReview": { + "description": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec", + "description": "Spec holds information about the request being evaluated. user and groups must be empty" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus", + "description": "Status is filled in by the server and indicates whether the request is allowed or not" + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "SelfSubjectAccessReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec": { + "description": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "properties": { + "nonResourceAttributes": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes", + "description": "NonResourceAttributes describes information for a non-resource access request" + }, + "resourceAttributes": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceAttributes", + "description": "ResourceAuthorizationAttributes describes information for a resource access request" + } + }, + "type": "object" + }, + "io.k8s.api.authorization.v1.SelfSubjectRulesReview": { + "description": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReviewSpec", + "description": "Spec holds information about the request being evaluated." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectRulesReviewStatus", + "description": "Status is filled in by the server and indicates the set of actions a user can perform." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "SelfSubjectRulesReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.SelfSubjectRulesReviewSpec": { + "description": "SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.", + "properties": { + "namespace": { + "description": "Namespace to evaluate rules for. Required.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authorization.v1.SubjectAccessReview": { + "description": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec", + "description": "Spec holds information about the request being evaluated" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus", + "description": "Status is filled in by the server and indicates whether the request is allowed or not" + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "SubjectAccessReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.SubjectAccessReviewSpec": { + "description": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "properties": { + "extra": { + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "description": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", + "type": "object" + }, + "groups": { + "description": "Groups is the groups you're testing for.", + "items": { + "type": "string" + }, + "type": "array" + }, + "nonResourceAttributes": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes", + "description": "NonResourceAttributes describes information for a non-resource access request" + }, + "resourceAttributes": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceAttributes", + "description": "ResourceAuthorizationAttributes describes information for a resource access request" + }, + "uid": { + "description": "UID information about the requesting user.", + "type": "string" + }, + "user": { + "description": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.authorization.v1.SubjectAccessReviewStatus": { + "description": "SubjectAccessReviewStatus", + "properties": { + "allowed": { + "description": "Allowed is required. True if the action would be allowed, false otherwise.", + "type": "boolean" + }, + "denied": { + "description": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", + "type": "boolean" + }, + "evaluationError": { + "description": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", + "type": "string" + }, + "reason": { + "description": "Reason is optional. It indicates why a request was allowed or denied.", + "type": "string" + } + }, + "required": [ + "allowed" + ], + "type": "object" + }, + "io.k8s.api.authorization.v1.SubjectRulesReviewStatus": { + "description": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", + "properties": { + "evaluationError": { + "description": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", + "type": "string" + }, + "incomplete": { + "description": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", + "type": "boolean" + }, + "nonResourceRules": { + "description": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "items": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceRule" + }, + "type": "array" + }, + "resourceRules": { + "description": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "items": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceRule" + }, + "type": "array" + } + }, + "required": [ + "resourceRules", + "nonResourceRules", + "incomplete" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v1.CrossVersionObjectReference": { + "description": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "properties": { + "apiVersion": { + "description": "apiVersion is the API version of the referent", + "type": "string" + }, + "kind": { + "description": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler": { + "description": "configuration of a horizontal pod autoscaler.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec", + "description": "spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus", + "description": "status is the current information about the autoscaler." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscaler", + "version": "v1" + } + ] + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList": { + "description": "list of horizontal pod autoscaler objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of horizontal pod autoscaler objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscalerList", + "version": "v1" + } + ] + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec": { + "description": "specification of a horizontal pod autoscaler.", + "properties": { + "maxReplicas": { + "description": "maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "format": "int32", + "type": "integer" + }, + "minReplicas": { + "description": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "format": "int32", + "type": "integer" + }, + "scaleTargetRef": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.CrossVersionObjectReference", + "description": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource." + }, + "targetCPUUtilizationPercentage": { + "description": "targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "scaleTargetRef", + "maxReplicas" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus": { + "description": "current status of a horizontal pod autoscaler", + "properties": { + "currentCPUUtilizationPercentage": { + "description": "currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", + "format": "int32", + "type": "integer" + }, + "currentReplicas": { + "description": "currentReplicas is the current number of replicas of pods managed by this autoscaler.", + "format": "int32", + "type": "integer" + }, + "desiredReplicas": { + "description": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler.", + "format": "int32", + "type": "integer" + }, + "lastScaleTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed." + }, + "observedGeneration": { + "description": "observedGeneration is the most recent generation observed by this autoscaler.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "currentReplicas", + "desiredReplicas" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v1.Scale": { + "description": "Scale represents a scaling request for a resource.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.ScaleSpec", + "description": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.ScaleStatus", + "description": "status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "Scale", + "version": "v1" + } + ] + }, + "io.k8s.api.autoscaling.v1.ScaleSpec": { + "description": "ScaleSpec describes the attributes of a scale subresource.", + "properties": { + "replicas": { + "description": "replicas is the desired number of instances for the scaled object.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.autoscaling.v1.ScaleStatus": { + "description": "ScaleStatus represents the current status of a scale subresource.", + "properties": { + "replicas": { + "description": "replicas is the actual number of observed instances of the scaled object.", + "format": "int32", + "type": "integer" + }, + "selector": { + "description": "selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "type": "string" + } + }, + "required": [ + "replicas" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ContainerResourceMetricSource": { + "description": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "properties": { + "container": { + "description": "container is the name of the container in the pods of the scaling target", + "type": "string" + }, + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + }, + "target": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget", + "description": "target specifies the target value for the given metric" + } + }, + "required": [ + "name", + "target", + "container" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus": { + "description": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "properties": { + "container": { + "description": "container is the name of the container in the pods of the scaling target", + "type": "string" + }, + "current": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus", + "description": "current contains the current value for the given metric" + }, + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + } + }, + "required": [ + "name", + "current", + "container" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.CrossVersionObjectReference": { + "description": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "properties": { + "apiVersion": { + "description": "apiVersion is the API version of the referent", + "type": "string" + }, + "kind": { + "description": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ExternalMetricSource": { + "description": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "properties": { + "metric": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier", + "description": "metric identifies the target metric by name and selector" + }, + "target": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget", + "description": "target specifies the target value for the given metric" + } + }, + "required": [ + "metric", + "target" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ExternalMetricStatus": { + "description": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "properties": { + "current": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus", + "description": "current contains the current value for the given metric" + }, + "metric": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier", + "description": "metric identifies the target metric by name and selector" + } + }, + "required": [ + "metric", + "current" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.HPAScalingPolicy": { + "description": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", + "properties": { + "periodSeconds": { + "description": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "format": "int32", + "type": "integer" + }, + "type": { + "description": "type is used to specify the scaling policy.", + "type": "string" + }, + "value": { + "description": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "type", + "value", + "periodSeconds" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.HPAScalingRules": { + "description": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", + "properties": { + "policies": { + "description": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingPolicy" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "selectPolicy": { + "description": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", + "type": "string" + }, + "stabilizationWindowSeconds": { + "description": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler": { + "description": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec", + "description": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus", + "description": "status is the current information about the autoscaler." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscaler", + "version": "v2" + } + ] + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior": { + "description": "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).", + "properties": { + "scaleDown": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingRules", + "description": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used)." + }, + "scaleUp": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingRules", + "description": "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n * increase no more than 4 pods per 60 seconds\n * double the number of pods per 60 seconds\nNo stabilization is used." + } + }, + "type": "object" + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition": { + "description": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastTransitionTime is the last time the condition transitioned from one status to another" + }, + "message": { + "description": "message is a human-readable explanation containing details about the transition", + "type": "string" + }, + "reason": { + "description": "reason is the reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "status is the status of the condition (True, False, Unknown)", + "type": "string" + }, + "type": { + "description": "type describes the current condition", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerList": { + "description": "HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of horizontal pod autoscaler objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "metadata is the standard list metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscalerList", + "version": "v2" + } + ] + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec": { + "description": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "properties": { + "behavior": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior", + "description": "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used." + }, + "maxReplicas": { + "description": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "format": "int32", + "type": "integer" + }, + "metrics": { + "description": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricSpec" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "minReplicas": { + "description": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "format": "int32", + "type": "integer" + }, + "scaleTargetRef": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.CrossVersionObjectReference", + "description": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count." + } + }, + "required": [ + "scaleTargetRef", + "maxReplicas" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus": { + "description": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "properties": { + "conditions": { + "description": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentMetrics": { + "description": "currentMetrics is the last read state of the metrics used by this autoscaler.", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricStatus" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "currentReplicas": { + "description": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "format": "int32", + "type": "integer" + }, + "desiredReplicas": { + "description": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "format": "int32", + "type": "integer" + }, + "lastScaleTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed." + }, + "observedGeneration": { + "description": "observedGeneration is the most recent generation observed by this autoscaler.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "desiredReplicas" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.MetricIdentifier": { + "description": "MetricIdentifier defines the name and optionally selector for a metric", + "properties": { + "name": { + "description": "name is the name of the given metric", + "type": "string" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics." + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.MetricSpec": { + "description": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "properties": { + "containerResource": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ContainerResourceMetricSource", + "description": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag." + }, + "external": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ExternalMetricSource", + "description": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)." + }, + "object": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ObjectMetricSource", + "description": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object)." + }, + "pods": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.PodsMetricSource", + "description": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value." + }, + "resource": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ResourceMetricSource", + "description": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source." + }, + "type": { + "description": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.MetricStatus": { + "description": "MetricStatus describes the last-read state of a single metric.", + "properties": { + "containerResource": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus", + "description": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source." + }, + "external": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ExternalMetricStatus", + "description": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster)." + }, + "object": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ObjectMetricStatus", + "description": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object)." + }, + "pods": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.PodsMetricStatus", + "description": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value." + }, + "resource": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ResourceMetricStatus", + "description": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source." + }, + "type": { + "description": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.MetricTarget": { + "description": "MetricTarget defines the target value, average value, or average utilization of a specific metric", + "properties": { + "averageUtilization": { + "description": "averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type", + "format": "int32", + "type": "integer" + }, + "averageValue": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)" + }, + "type": { + "description": "type represents whether the metric type is Utilization, Value, or AverageValue", + "type": "string" + }, + "value": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "value is the target value of the metric (as a quantity)." + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.MetricValueStatus": { + "description": "MetricValueStatus holds the current value for a metric", + "properties": { + "averageUtilization": { + "description": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "format": "int32", + "type": "integer" + }, + "averageValue": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)" + }, + "value": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "value is the current value of the metric (as a quantity)." + } + }, + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ObjectMetricSource": { + "description": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "properties": { + "describedObject": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.CrossVersionObjectReference", + "description": "describedObject specifies the descriptions of a object,such as kind,name apiVersion" + }, + "metric": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier", + "description": "metric identifies the target metric by name and selector" + }, + "target": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget", + "description": "target specifies the target value for the given metric" + } + }, + "required": [ + "describedObject", + "target", + "metric" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ObjectMetricStatus": { + "description": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "properties": { + "current": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus", + "description": "current contains the current value for the given metric" + }, + "describedObject": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.CrossVersionObjectReference", + "description": "DescribedObject specifies the descriptions of a object,such as kind,name apiVersion" + }, + "metric": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier", + "description": "metric identifies the target metric by name and selector" + } + }, + "required": [ + "metric", + "current", + "describedObject" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.PodsMetricSource": { + "description": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "properties": { + "metric": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier", + "description": "metric identifies the target metric by name and selector" + }, + "target": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget", + "description": "target specifies the target value for the given metric" + } + }, + "required": [ + "metric", + "target" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.PodsMetricStatus": { + "description": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "properties": { + "current": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus", + "description": "current contains the current value for the given metric" + }, + "metric": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier", + "description": "metric identifies the target metric by name and selector" + } + }, + "required": [ + "metric", + "current" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ResourceMetricSource": { + "description": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "properties": { + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + }, + "target": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget", + "description": "target specifies the target value for the given metric" + } + }, + "required": [ + "name", + "target" + ], + "type": "object" + }, + "io.k8s.api.autoscaling.v2.ResourceMetricStatus": { + "description": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "properties": { + "current": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus", + "description": "current contains the current value for the given metric" + }, + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + } + }, + "required": [ + "name", + "current" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.CronJob": { + "description": "CronJob represents the configuration of a single cron job.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.batch.v1.CronJobSpec", + "description": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.batch.v1.CronJobStatus", + "description": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "CronJob", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.CronJobList": { + "description": "CronJobList is a collection of cron jobs.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CronJobs.", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.CronJob" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "CronJobList", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.CronJobSpec": { + "description": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "properties": { + "concurrencyPolicy": { + "description": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "type": "string" + }, + "failedJobsHistoryLimit": { + "description": "The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.", + "format": "int32", + "type": "integer" + }, + "jobTemplate": { + "$ref": "#/definitions/io.k8s.api.batch.v1.JobTemplateSpec", + "description": "Specifies the job that will be created when executing a CronJob." + }, + "schedule": { + "description": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "type": "string" + }, + "startingDeadlineSeconds": { + "description": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "format": "int64", + "type": "integer" + }, + "successfulJobsHistoryLimit": { + "description": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", + "format": "int32", + "type": "integer" + }, + "suspend": { + "description": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "type": "boolean" + }, + "timeZone": { + "description": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", + "type": "string" + } + }, + "required": [ + "schedule", + "jobTemplate" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.CronJobStatus": { + "description": "CronJobStatus represents the current state of a cron job.", + "properties": { + "active": { + "description": "A list of pointers to currently running jobs.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "lastScheduleTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Information when was the last time the job was successfully scheduled." + }, + "lastSuccessfulTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Information when was the last time the job successfully completed." + } + }, + "type": "object" + }, + "io.k8s.api.batch.v1.Job": { + "description": "Job represents the configuration of a single job.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.batch.v1.JobSpec", + "description": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.batch.v1.JobStatus", + "description": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "Job", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.JobCondition": { + "description": "JobCondition describes current state of a job.", + "properties": { + "lastProbeTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition was checked." + }, + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transit from one status to another." + }, + "message": { + "description": "Human readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "(brief) reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of job condition, Complete or Failed.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.JobList": { + "description": "JobList is a collection of jobs.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of Jobs.", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.Job" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "JobList", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.JobSpec": { + "description": "JobSpec describes how the job execution will look like.", + "properties": { + "activeDeadlineSeconds": { + "description": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", + "format": "int64", + "type": "integer" + }, + "backoffLimit": { + "description": "Specifies the number of retries before marking this job failed. Defaults to 6", + "format": "int32", + "type": "integer" + }, + "backoffLimitPerIndex": { + "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "format": "int32", + "type": "integer" + }, + "completionMode": { + "description": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "type": "string" + }, + "completions": { + "description": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "format": "int32", + "type": "integer" + }, + "manualSelector": { + "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", + "type": "boolean" + }, + "maxFailedIndexes": { + "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "format": "int32", + "type": "integer" + }, + "parallelism": { + "description": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "format": "int32", + "type": "integer" + }, + "podFailurePolicy": { + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicy", + "description": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default)." + }, + "podReplacementPolicy": { + "description": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "type": "string" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors" + }, + "suspend": { + "description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "type": "boolean" + }, + "template": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec", + "description": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/" + }, + "ttlSecondsAfterFinished": { + "description": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "template" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.JobStatus": { + "description": "JobStatus represents the current state of a Job.", + "properties": { + "active": { + "description": "The number of pending and running pods.", + "format": "int32", + "type": "integer" + }, + "completedIndexes": { + "description": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "type": "string" + }, + "completionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully." + }, + "conditions": { + "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.JobCondition" + }, + "type": "array", + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "failed": { + "description": "The number of pods which reached phase Failed.", + "format": "int32", + "type": "integer" + }, + "failedIndexes": { + "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "type": "string" + }, + "ready": { + "description": "The number of pods which have a Ready condition.", + "format": "int32", + "type": "integer" + }, + "startTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC." + }, + "succeeded": { + "description": "The number of pods which reached phase Succeeded.", + "format": "int32", + "type": "integer" + }, + "terminating": { + "description": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + "format": "int32", + "type": "integer" + }, + "uncountedTerminatedPods": { + "$ref": "#/definitions/io.k8s.api.batch.v1.UncountedTerminatedPods", + "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null." + } + }, + "type": "object" + }, + "io.k8s.api.batch.v1.JobTemplateSpec": { + "description": "JobTemplateSpec describes the data a Job should have when created from a template", + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.batch.v1.JobSpec", + "description": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object" + }, + "io.k8s.api.batch.v1.PodFailurePolicy": { + "description": "PodFailurePolicy describes how failed pods influence the backoffLimit.", + "properties": { + "rules": { + "description": "A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed.", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicyRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "rules" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement": { + "description": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", + "properties": { + "containerName": { + "description": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", + "type": "string" + }, + "operator": { + "description": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "type": "string" + }, + "values": { + "description": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", + "items": { + "format": "int32", + "type": "integer" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "required": [ + "operator", + "values" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern": { + "description": "PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type.", + "properties": { + "status": { + "description": "Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True.", + "type": "string" + }, + "type": { + "description": "Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.PodFailurePolicyRule": { + "description": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "properties": { + "action": { + "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "type": "string" + }, + "onExitCodes": { + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement", + "description": "Represents the requirement on the container exit codes." + }, + "onPodConditions": { + "description": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "action" + ], + "type": "object" + }, + "io.k8s.api.batch.v1.UncountedTerminatedPods": { + "description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", + "properties": { + "failed": { + "description": "failed holds UIDs of failed Pods.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "succeeded": { + "description": "succeeded holds UIDs of succeeded Pods.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "type": "object" + }, + "io.k8s.api.certificates.v1.CertificateSigningRequest": { + "description": "CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued.\n\nKubelets use this API to obtain:\n 1. client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client-kubelet\" signerName).\n 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the \"kubernetes.io/kubelet-serving\" signerName).\n\nThis API can be used to request client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client\" signerName), or to obtain certificates from custom non-Kubernetes signers.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestSpec", + "description": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, expirationSeconds, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestStatus", + "description": "status contains information about whether the request is approved or denied, and the certificate issued by the signer, or the failure condition indicating signer failure." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "CertificateSigningRequest", + "version": "v1" + } + ] + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestCondition": { + "description": "CertificateSigningRequestCondition describes a condition of a CertificateSigningRequest object", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time." + }, + "lastUpdateTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastUpdateTime is the time of the last update to this condition" + }, + "message": { + "description": "message contains a human readable message with details about the request state", + "type": "string" + }, + "reason": { + "description": "reason indicates a brief reason for the request state", + "type": "string" + }, + "status": { + "description": "status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\".", + "type": "string" + }, + "type": { + "description": "type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\".\n\nAn \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer.\n\nA \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer.\n\nA \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate.\n\nApproved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added.\n\nOnly one condition of a given type is allowed.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestList": { + "description": "CertificateSigningRequestList is a collection of CertificateSigningRequest objects", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a collection of CertificateSigningRequest objects", + "items": { + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "CertificateSigningRequestList", + "version": "v1" + } + ] + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestSpec": { + "description": "CertificateSigningRequestSpec contains the certificate request.", + "properties": { + "expirationSeconds": { + "description": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", + "format": "int32", + "type": "integer" + }, + "extra": { + "additionalProperties": { + "items": { + "type": "string" + }, + "type": "array" + }, + "description": "extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "type": "object" + }, + "groups": { + "description": "groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "request": { + "description": "request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.", + "format": "byte", + "type": "string", + "x-kubernetes-list-type": "atomic" + }, + "signerName": { + "description": "signerName indicates the requested signer, and is a qualified name.\n\nList/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector.\n\nWell-known Kubernetes signers are:\n 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver.\n Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver.\n Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.\n Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n\nMore details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers\n\nCustom signerNames can also be specified. The signer defines:\n 1. Trust distribution: how trust (CA bundles) are distributed.\n 2. Permitted subjects: and behavior when a disallowed subject is requested.\n 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.\n 4. Required, permitted, or forbidden key usages / extended key usages.\n 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.\n 6. Whether or not requests for CA certificates are allowed.", + "type": "string" + }, + "uid": { + "description": "uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "type": "string" + }, + "usages": { + "description": "usages specifies a set of key usages requested in the issued certificate.\n\nRequests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\".\n\nRequests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\".\n\nValid values are:\n \"signing\", \"digital signature\", \"content commitment\",\n \"key encipherment\", \"key agreement\", \"data encipherment\",\n \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\",\n \"server auth\", \"client auth\",\n \"code signing\", \"email protection\", \"s/mime\",\n \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\",\n \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "username": { + "description": "username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "type": "string" + } + }, + "required": [ + "request", + "signerName" + ], + "type": "object" + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestStatus": { + "description": "CertificateSigningRequestStatus contains conditions used to indicate approved/denied/failed status of the request, and the issued certificate.", + "properties": { + "certificate": { + "description": "certificate is populated with an issued certificate by the signer after an Approved condition is present. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificate must contain one or more PEM blocks.\n 2. All PEM blocks must have the \"CERTIFICATE\" label, contain no headers, and the encoded data\n must be a BER-encoded ASN.1 Certificate structure as described in section 4 of RFC5280.\n 3. Non-PEM content may appear before or after the \"CERTIFICATE\" PEM blocks and is unvalidated,\n to allow for explanatory text as described in section 5.2 of RFC7468.\n\nIf more than one PEM block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes.\n\nThe certificate is encoded in PEM format.\n\nWhen serialized as JSON or YAML, the data is additionally base64-encoded, so it consists of:\n\n base64(\n -----BEGIN CERTIFICATE-----\n ...\n -----END CERTIFICATE-----\n )", + "format": "byte", + "type": "string", + "x-kubernetes-list-type": "atomic" + }, + "conditions": { + "description": "conditions applied to the request. Known conditions are \"Approved\", \"Denied\", and \"Failed\".", + "items": { + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + } + }, + "type": "object" + }, + "io.k8s.api.certificates.v1alpha1.ClusterTrustBundle": { + "description": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "metadata contains the object metadata." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec", + "description": "spec contains the signer (if any) and trust anchors." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "ClusterTrustBundle", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.certificates.v1alpha1.ClusterTrustBundleList": { + "description": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a collection of ClusterTrustBundle objects", + "items": { + "$ref": "#/definitions/io.k8s.api.certificates.v1alpha1.ClusterTrustBundle" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "metadata contains the list metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "ClusterTrustBundleList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec": { + "description": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "properties": { + "signerName": { + "description": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=\u003cthe signer name\u003e verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "type": "string" + }, + "trustBundle": { + "description": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", + "type": "string" + } + }, + "required": [ + "trustBundle" + ], + "type": "object" + }, + "io.k8s.api.coordination.v1.Lease": { + "description": "Lease defines a lease concept.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.coordination.v1.LeaseSpec", + "description": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "Lease", + "version": "v1" + } + ] + }, + "io.k8s.api.coordination.v1.LeaseList": { + "description": "LeaseList is a list of Lease objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.coordination.v1.Lease" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "LeaseList", + "version": "v1" + } + ] + }, + "io.k8s.api.coordination.v1.LeaseSpec": { + "description": "LeaseSpec is a specification of a Lease.", + "properties": { + "acquireTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "acquireTime is a time when the current lease was acquired." + }, + "holderIdentity": { + "description": "holderIdentity contains the identity of the holder of a current lease.", + "type": "string" + }, + "leaseDurationSeconds": { + "description": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", + "format": "int32", + "type": "integer" + }, + "leaseTransitions": { + "description": "leaseTransitions is the number of transitions of a lease between holders.", + "format": "int32", + "type": "integer" + }, + "renewTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "renewTime is a time when the current holder of a lease has last updated the lease." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource": { + "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "string" + }, + "partition": { + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "format": "int32", + "type": "integer" + }, + "readOnly": { + "description": "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "boolean" + }, + "volumeID": { + "description": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "string" + } + }, + "required": [ + "volumeID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Affinity": { + "description": "Affinity is a group of affinity scheduling rules.", + "properties": { + "nodeAffinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeAffinity", + "description": "Describes node affinity scheduling rules for the pod." + }, + "podAffinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinity", + "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s))." + }, + "podAntiAffinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodAntiAffinity", + "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s))." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.AttachedVolume": { + "description": "AttachedVolume describes a volume attached to a node", + "properties": { + "devicePath": { + "description": "DevicePath represents the device path where the volume should be available", + "type": "string" + }, + "name": { + "description": "Name of the attached volume", + "type": "string" + } + }, + "required": [ + "name", + "devicePath" + ], + "type": "object" + }, + "io.k8s.api.core.v1.AzureDiskVolumeSource": { + "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "properties": { + "cachingMode": { + "description": "cachingMode is the Host Caching mode: None, Read Only, Read Write.", + "type": "string" + }, + "diskName": { + "description": "diskName is the Name of the data disk in the blob storage", + "type": "string" + }, + "diskURI": { + "description": "diskURI is the URI of data disk in the blob storage", + "type": "string" + }, + "fsType": { + "description": "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "kind": { + "description": "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + "type": "string" + }, + "readOnly": { + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + } + }, + "required": [ + "diskName", + "diskURI" + ], + "type": "object" + }, + "io.k8s.api.core.v1.AzureFilePersistentVolumeSource": { + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "properties": { + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretName": { + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", + "type": "string" + }, + "secretNamespace": { + "description": "secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", + "type": "string" + }, + "shareName": { + "description": "shareName is the azure Share Name", + "type": "string" + } + }, + "required": [ + "secretName", + "shareName" + ], + "type": "object" + }, + "io.k8s.api.core.v1.AzureFileVolumeSource": { + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "properties": { + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretName": { + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", + "type": "string" + }, + "shareName": { + "description": "shareName is the azure share Name", + "type": "string" + } + }, + "required": [ + "secretName", + "shareName" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Binding": { + "description": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "target": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "The target object that you want to bind to the standard object." + } + }, + "required": [ + "target" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Binding", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.CSIPersistentVolumeSource": { + "description": "Represents storage that is managed by an external CSI volume driver (Beta feature)", + "properties": { + "controllerExpandSecretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed." + }, + "controllerPublishSecretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed." + }, + "driver": { + "description": "driver is the name of the driver to use for this volume. Required.", + "type": "string" + }, + "fsType": { + "description": "fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", + "type": "string" + }, + "nodeExpandSecretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed." + }, + "nodePublishSecretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed." + }, + "nodeStageSecretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed." + }, + "readOnly": { + "description": "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + "type": "boolean" + }, + "volumeAttributes": { + "additionalProperties": { + "type": "string" + }, + "description": "volumeAttributes of the volume to publish.", + "type": "object" + }, + "volumeHandle": { + "description": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "type": "string" + } + }, + "required": [ + "driver", + "volumeHandle" + ], + "type": "object" + }, + "io.k8s.api.core.v1.CSIVolumeSource": { + "description": "Represents a source location of a volume to mount, managed by an external CSI driver", + "properties": { + "driver": { + "description": "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "type": "string" + }, + "fsType": { + "description": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "type": "string" + }, + "nodePublishSecretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed." + }, + "readOnly": { + "description": "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).", + "type": "boolean" + }, + "volumeAttributes": { + "additionalProperties": { + "type": "string" + }, + "description": "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "type": "object" + } + }, + "required": [ + "driver" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Capabilities": { + "description": "Adds and removes POSIX capabilities from running containers.", + "properties": { + "add": { + "description": "Added capabilities", + "items": { + "type": "string" + }, + "type": "array" + }, + "drop": { + "description": "Removed capabilities", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.CephFSPersistentVolumeSource": { + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "properties": { + "monitors": { + "description": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "items": { + "type": "string" + }, + "type": "array" + }, + "path": { + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "type": "string" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "boolean" + }, + "secretFile": { + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + }, + "user": { + "description": "user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + } + }, + "required": [ + "monitors" + ], + "type": "object" + }, + "io.k8s.api.core.v1.CephFSVolumeSource": { + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "properties": { + "monitors": { + "description": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "items": { + "type": "string" + }, + "type": "array" + }, + "path": { + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "type": "string" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "boolean" + }, + "secretFile": { + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it" + }, + "user": { + "description": "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + } + }, + "required": [ + "monitors" + ], + "type": "object" + }, + "io.k8s.api.core.v1.CinderPersistentVolumeSource": { + "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "secretRef is Optional: points to a secret object containing parameters used to connect to OpenStack." + }, + "volumeID": { + "description": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + } + }, + "required": [ + "volumeID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.CinderVolumeSource": { + "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack." + }, + "volumeID": { + "description": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + } + }, + "required": [ + "volumeID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ClaimSource": { + "description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", + "properties": { + "resourceClaimName": { + "description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", + "type": "string" + }, + "resourceClaimTemplateName": { + "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ClientIPConfig": { + "description": "ClientIPConfig represents the configurations of Client IP based session affinity.", + "properties": { + "timeoutSeconds": { + "description": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be \u003e0 \u0026\u0026 \u003c=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ClusterTrustBundleProjection": { + "description": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", + "properties": { + "labelSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\"." + }, + "name": { + "description": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", + "type": "string" + }, + "optional": { + "description": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", + "type": "boolean" + }, + "path": { + "description": "Relative path from the volume root to write the bundle.", + "type": "string" + }, + "signerName": { + "description": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", + "type": "string" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ComponentCondition": { + "description": "Information about the condition of a component.", + "properties": { + "error": { + "description": "Condition error code for a component. For example, a health check error code.", + "type": "string" + }, + "message": { + "description": "Message about the condition for a component. For example, information about a health check.", + "type": "string" + }, + "status": { + "description": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", + "type": "string" + }, + "type": { + "description": "Type of condition for a component. Valid value: \"Healthy\"", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ComponentStatus": { + "description": "ComponentStatus (and ComponentStatusList) holds the cluster validation info. Deprecated: This API is deprecated in v1.19+", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "conditions": { + "description": "List of component conditions observed", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ComponentCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ComponentStatus", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ComponentStatusList": { + "description": "Status of all the conditions for the component as a list of ComponentStatus objects. Deprecated: This API is deprecated in v1.19+", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ComponentStatus objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ComponentStatus" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ComponentStatusList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ConfigMap": { + "description": "ConfigMap holds configuration data for pods to consume.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "binaryData": { + "additionalProperties": { + "format": "byte", + "type": "string" + }, + "description": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", + "type": "object" + }, + "data": { + "additionalProperties": { + "type": "string" + }, + "description": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", + "type": "object" + }, + "immutable": { + "description": "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.", + "type": "boolean" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ConfigMap", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ConfigMapEnvSource": { + "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "properties": { + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the ConfigMap must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ConfigMapKeySelector": { + "description": "Selects a key from a ConfigMap.", + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the ConfigMap or its key must be defined", + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ConfigMapList": { + "description": "ConfigMapList is a resource containing a list of ConfigMap objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of ConfigMaps.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMap" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ConfigMapList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ConfigMapNodeConfigSource": { + "description": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration", + "properties": { + "kubeletConfigKey": { + "description": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", + "type": "string" + }, + "name": { + "description": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", + "type": "string" + }, + "namespace": { + "description": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", + "type": "string" + }, + "resourceVersion": { + "description": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "type": "string" + }, + "uid": { + "description": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "type": "string" + } + }, + "required": [ + "namespace", + "name", + "kubeletConfigKey" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ConfigMapProjection": { + "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "properties": { + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + }, + "type": "array" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional specify whether the ConfigMap or its keys must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ConfigMapVolumeSource": { + "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "properties": { + "defaultMode": { + "description": "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + }, + "type": "array" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional specify whether the ConfigMap or its keys must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.Container": { + "description": "A single application container that you want to run within a pod.", + "properties": { + "args": { + "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" + }, + "type": "array" + }, + "image": { + "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "$ref": "#/definitions/io.k8s.api.core.v1.Lifecycle", + "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated." + }, + "livenessProbe": { + "$ref": "#/definitions/io.k8s.api.core.v1.Probe", + "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string" + }, + "ports": { + "description": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "$ref": "#/definitions/io.k8s.api.core.v1.Probe", + "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/" + }, + "restartPolicy": { + "description": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + "type": "string" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecurityContext", + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/" + }, + "startupProbe": { + "$ref": "#/definitions/io.k8s.api.core.v1.Probe", + "description": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes" + }, + "stdin": { + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "terminationMessagePath": { + "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ContainerImage": { + "description": "Describe a container image", + "properties": { + "names": { + "description": "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]", + "items": { + "type": "string" + }, + "type": "array" + }, + "sizeBytes": { + "description": "The size of the image in bytes.", + "format": "int64", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ContainerPort": { + "description": "ContainerPort represents a network port in a single container.", + "properties": { + "containerPort": { + "description": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.", + "format": "int32", + "type": "integer" + }, + "hostIP": { + "description": "What host IP to bind the external port to.", + "type": "string" + }, + "hostPort": { + "description": "Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", + "format": "int32", + "type": "integer" + }, + "name": { + "description": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", + "type": "string" + }, + "protocol": { + "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", + "type": "string" + } + }, + "required": [ + "containerPort" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ContainerResizePolicy": { + "description": "ContainerResizePolicy represents resource resize policy for the container.", + "properties": { + "resourceName": { + "description": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "type": "string" + }, + "restartPolicy": { + "description": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", + "type": "string" + } + }, + "required": [ + "resourceName", + "restartPolicy" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ContainerState": { + "description": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", + "properties": { + "running": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStateRunning", + "description": "Details about a running container" + }, + "terminated": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStateTerminated", + "description": "Details about a terminated container" + }, + "waiting": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStateWaiting", + "description": "Details about a waiting container" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ContainerStateRunning": { + "description": "ContainerStateRunning is a running state of a container.", + "properties": { + "startedAt": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Time at which the container was last (re-)started" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ContainerStateTerminated": { + "description": "ContainerStateTerminated is a terminated state of a container.", + "properties": { + "containerID": { + "description": "Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'", + "type": "string" + }, + "exitCode": { + "description": "Exit status from the last termination of the container", + "format": "int32", + "type": "integer" + }, + "finishedAt": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Time at which the container last terminated" + }, + "message": { + "description": "Message regarding the last termination of the container", + "type": "string" + }, + "reason": { + "description": "(brief) reason from the last termination of the container", + "type": "string" + }, + "signal": { + "description": "Signal from the last termination of the container", + "format": "int32", + "type": "integer" + }, + "startedAt": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Time at which previous execution of the container started" + } + }, + "required": [ + "exitCode" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ContainerStateWaiting": { + "description": "ContainerStateWaiting is a waiting state of a container.", + "properties": { + "message": { + "description": "Message regarding why the container is not yet running.", + "type": "string" + }, + "reason": { + "description": "(brief) reason the container is not yet running.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ContainerStatus": { + "description": "ContainerStatus contains details for the current status of this container.", + "properties": { + "allocatedResources": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "type": "object" + }, + "containerID": { + "description": "ContainerID is the ID of the container in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "type": "string" + }, + "image": { + "description": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "type": "string" + }, + "imageID": { + "description": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "type": "string" + }, + "lastState": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerState", + "description": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0." + }, + "name": { + "description": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "type": "string" + }, + "ready": { + "description": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "type": "boolean" + }, + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", + "description": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized." + }, + "restartCount": { + "description": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "format": "int32", + "type": "integer" + }, + "started": { + "description": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "type": "boolean" + }, + "state": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerState", + "description": "State holds details about the container's current condition." + } + }, + "required": [ + "name", + "ready", + "restartCount", + "image", + "imageID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.DaemonEndpoint": { + "description": "DaemonEndpoint contains information about a single Daemon endpoint.", + "properties": { + "Port": { + "description": "Port number of the given endpoint.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "Port" + ], + "type": "object" + }, + "io.k8s.api.core.v1.DownwardAPIProjection": { + "description": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "properties": { + "items": { + "description": "Items is a list of DownwardAPIVolume file", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.DownwardAPIVolumeFile": { + "description": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "properties": { + "fieldRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectFieldSelector", + "description": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported." + }, + "mode": { + "description": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "path": { + "description": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "type": "string" + }, + "resourceFieldRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceFieldSelector", + "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported." + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.DownwardAPIVolumeSource": { + "description": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + "properties": { + "defaultMode": { + "description": "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "items": { + "description": "Items is a list of downward API volume file", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EmptyDirVolumeSource": { + "description": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "properties": { + "medium": { + "description": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "type": "string" + }, + "sizeLimit": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EndpointAddress": { + "description": "EndpointAddress is a tuple that describes single IP address.", + "properties": { + "hostname": { + "description": "The Hostname of this endpoint", + "type": "string" + }, + "ip": { + "description": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", + "type": "string" + }, + "nodeName": { + "description": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", + "type": "string" + }, + "targetRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "Reference to object providing the endpoint." + } + }, + "required": [ + "ip" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.EndpointPort": { + "description": "EndpointPort is a tuple that describes a single port.", + "properties": { + "appProtocol": { + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "type": "string" + }, + "name": { + "description": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", + "type": "string" + }, + "port": { + "description": "The port number of the endpoint.", + "format": "int32", + "type": "integer" + }, + "protocol": { + "description": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.EndpointSubset": { + "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "properties": { + "addresses": { + "description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" + }, + "type": "array" + }, + "notReadyAddresses": { + "description": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" + }, + "type": "array" + }, + "ports": { + "description": "Port numbers available on the related IP addresses.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointPort" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.Endpoints": { + "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "subsets": { + "description": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointSubset" + }, + "type": "array" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Endpoints", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EndpointsList": { + "description": "EndpointsList is a list of endpoints.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of endpoints.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Endpoints" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "EndpointsList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EnvFromSource": { + "description": "EnvFromSource represents the source of a set of ConfigMaps", + "properties": { + "configMapRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapEnvSource", + "description": "The ConfigMap to select from" + }, + "prefix": { + "description": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "type": "string" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretEnvSource", + "description": "The Secret to select from" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EnvVar": { + "description": "EnvVar represents an environment variable present in a Container.", + "properties": { + "name": { + "description": "Name of the environment variable. Must be a C_IDENTIFIER.", + "type": "string" + }, + "value": { + "description": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "type": "string" + }, + "valueFrom": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVarSource", + "description": "Source for the environment variable's value. Cannot be used if value is not empty." + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.EnvVarSource": { + "description": "EnvVarSource represents a source for the value of an EnvVar.", + "properties": { + "configMapKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector", + "description": "Selects a key of a ConfigMap." + }, + "fieldRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectFieldSelector", + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs." + }, + "resourceFieldRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceFieldSelector", + "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported." + }, + "secretKeyRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector", + "description": "Selects a key of a secret in the pod's namespace" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EphemeralContainer": { + "description": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.", + "properties": { + "args": { + "description": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "items": { + "type": "string" + }, + "type": "array" + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "items": { + "type": "string" + }, + "type": "array" + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" + }, + "type": "array" + }, + "image": { + "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "$ref": "#/definitions/io.k8s.api.core.v1.Lifecycle", + "description": "Lifecycle is not allowed for ephemeral containers." + }, + "livenessProbe": { + "$ref": "#/definitions/io.k8s.api.core.v1.Probe", + "description": "Probes are not allowed for ephemeral containers." + }, + "name": { + "description": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + "type": "string" + }, + "ports": { + "description": "Ports are not allowed for ephemeral containers.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "$ref": "#/definitions/io.k8s.api.core.v1.Probe", + "description": "Probes are not allowed for ephemeral containers." + }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements", + "description": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod." + }, + "restartPolicy": { + "description": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.", + "type": "string" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecurityContext", + "description": "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext." + }, + "startupProbe": { + "$ref": "#/definitions/io.k8s.api.core.v1.Probe", + "description": "Probes are not allowed for ephemeral containers." + }, + "stdin": { + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "targetContainerName": { + "description": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.", + "type": "string" + }, + "terminationMessagePath": { + "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.EphemeralVolumeSource": { + "description": "Represents an ephemeral volume that is handled by a normal storage driver.", + "properties": { + "volumeClaimTemplate": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimTemplate", + "description": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\u003cpod name\u003e-\u003cvolume name\u003e` where `\u003cvolume name\u003e` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.Event": { + "description": "Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", + "properties": { + "action": { + "description": "What action was taken/failed regarding to the Regarding object.", + "type": "string" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "count": { + "description": "The number of times this event has occurred.", + "format": "int32", + "type": "integer" + }, + "eventTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "Time when this Event was first observed." + }, + "firstTimestamp": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)" + }, + "involvedObject": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "The object that this event is about." + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "lastTimestamp": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "The time at which the most recent occurrence of this event was recorded." + }, + "message": { + "description": "A human-readable description of the status of this operation.", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "reason": { + "description": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "type": "string" + }, + "related": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "Optional secondary object for more complex actions." + }, + "reportingComponent": { + "description": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "type": "string" + }, + "reportingInstance": { + "description": "ID of the controller instance, e.g. `kubelet-xyzf`.", + "type": "string" + }, + "series": { + "$ref": "#/definitions/io.k8s.api.core.v1.EventSeries", + "description": "Data about the Event series this event represents or nil if it's a singleton Event." + }, + "source": { + "$ref": "#/definitions/io.k8s.api.core.v1.EventSource", + "description": "The component reporting this event. Should be a short machine understandable string." + }, + "type": { + "description": "Type of this event (Normal, Warning), new types could be added in the future", + "type": "string" + } + }, + "required": [ + "metadata", + "involvedObject" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Event", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EventList": { + "description": "EventList is a list of events.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of events", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Event" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "EventList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EventSeries": { + "description": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", + "properties": { + "count": { + "description": "Number of occurrences in this series up to the last heartbeat time", + "format": "int32", + "type": "integer" + }, + "lastObservedTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "Time of the last occurrence observed" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.EventSource": { + "description": "EventSource contains information for an event.", + "properties": { + "component": { + "description": "Component from which the event is generated.", + "type": "string" + }, + "host": { + "description": "Node name on which the event is generated.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ExecAction": { + "description": "ExecAction describes a \"run in container\" action.", + "properties": { + "command": { + "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.FCVolumeSource": { + "description": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "lun": { + "description": "lun is Optional: FC target lun number", + "format": "int32", + "type": "integer" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "targetWWNs": { + "description": "targetWWNs is Optional: FC target worldwide names (WWNs)", + "items": { + "type": "string" + }, + "type": "array" + }, + "wwids": { + "description": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.FlexPersistentVolumeSource": { + "description": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", + "properties": { + "driver": { + "description": "driver is the name of the driver to use for this volume.", + "type": "string" + }, + "fsType": { + "description": "fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "type": "string" + }, + "options": { + "additionalProperties": { + "type": "string" + }, + "description": "options is Optional: this field holds extra command options if any.", + "type": "object" + }, + "readOnly": { + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "secretRef is Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." + } + }, + "required": [ + "driver" + ], + "type": "object" + }, + "io.k8s.api.core.v1.FlexVolumeSource": { + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "properties": { + "driver": { + "description": "driver is the name of the driver to use for this volume.", + "type": "string" + }, + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "type": "string" + }, + "options": { + "additionalProperties": { + "type": "string" + }, + "description": "options is Optional: this field holds extra command options if any.", + "type": "object" + }, + "readOnly": { + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts." + } + }, + "required": [ + "driver" + ], + "type": "object" + }, + "io.k8s.api.core.v1.FlockerVolumeSource": { + "description": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + "properties": { + "datasetName": { + "description": "datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", + "type": "string" + }, + "datasetUUID": { + "description": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.GCEPersistentDiskVolumeSource": { + "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "string" + }, + "partition": { + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "format": "int32", + "type": "integer" + }, + "pdName": { + "description": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "boolean" + } + }, + "required": [ + "pdName" + ], + "type": "object" + }, + "io.k8s.api.core.v1.GRPCAction": { + "properties": { + "port": { + "description": "Port number of the gRPC service. Number must be in the range 1 to 65535.", + "format": "int32", + "type": "integer" + }, + "service": { + "description": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.", + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "io.k8s.api.core.v1.GitRepoVolumeSource": { + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "properties": { + "directory": { + "description": "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", + "type": "string" + }, + "repository": { + "description": "repository is the URL", + "type": "string" + }, + "revision": { + "description": "revision is the commit hash for the specified revision.", + "type": "string" + } + }, + "required": [ + "repository" + ], + "type": "object" + }, + "io.k8s.api.core.v1.GlusterfsPersistentVolumeSource": { + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "properties": { + "endpoints": { + "description": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "endpointsNamespace": { + "description": "endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "path": { + "description": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "boolean" + } + }, + "required": [ + "endpoints", + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.GlusterfsVolumeSource": { + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "properties": { + "endpoints": { + "description": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "path": { + "description": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "boolean" + } + }, + "required": [ + "endpoints", + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.HTTPGetAction": { + "description": "HTTPGetAction describes an action based on HTTP Get requests.", + "properties": { + "host": { + "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", + "type": "string" + }, + "httpHeaders": { + "description": "Custom headers to set in the request. HTTP allows repeated headers.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.HTTPHeader" + }, + "type": "array" + }, + "path": { + "description": "Path to access on the HTTP server.", + "type": "string" + }, + "port": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + }, + "scheme": { + "description": "Scheme to use for connecting to the host. Defaults to HTTP.", + "type": "string" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "io.k8s.api.core.v1.HTTPHeader": { + "description": "HTTPHeader describes a custom header to be used in HTTP probes", + "properties": { + "name": { + "description": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", + "type": "string" + }, + "value": { + "description": "The header field value", + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "io.k8s.api.core.v1.HostAlias": { + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", + "properties": { + "hostnames": { + "description": "Hostnames for the above IP address.", + "items": { + "type": "string" + }, + "type": "array" + }, + "ip": { + "description": "IP address of the host file entry.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.HostIP": { + "description": "HostIP represents a single IP address allocated to the host.", + "properties": { + "ip": { + "description": "IP is the IP address assigned to the host", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.HostPathVolumeSource": { + "description": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "properties": { + "path": { + "description": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "string" + }, + "type": { + "description": "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "string" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ISCSIPersistentVolumeSource": { + "description": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "properties": { + "chapAuthDiscovery": { + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + "type": "boolean" + }, + "chapAuthSession": { + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication", + "type": "boolean" + }, + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "type": "string" + }, + "initiatorName": { + "description": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", + "type": "string" + }, + "iqn": { + "description": "iqn is Target iSCSI Qualified Name.", + "type": "string" + }, + "iscsiInterface": { + "description": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "type": "string" + }, + "lun": { + "description": "lun is iSCSI Target Lun number.", + "format": "int32", + "type": "integer" + }, + "portals": { + "description": "portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "items": { + "type": "string" + }, + "type": "array" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "secretRef is the CHAP Secret for iSCSI target and initiator authentication" + }, + "targetPortal": { + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "type": "string" + } + }, + "required": [ + "targetPortal", + "iqn", + "lun" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ISCSIVolumeSource": { + "description": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "properties": { + "chapAuthDiscovery": { + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + "type": "boolean" + }, + "chapAuthSession": { + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication", + "type": "boolean" + }, + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "type": "string" + }, + "initiatorName": { + "description": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", + "type": "string" + }, + "iqn": { + "description": "iqn is the target iSCSI Qualified Name.", + "type": "string" + }, + "iscsiInterface": { + "description": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "type": "string" + }, + "lun": { + "description": "lun represents iSCSI Target Lun number.", + "format": "int32", + "type": "integer" + }, + "portals": { + "description": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "items": { + "type": "string" + }, + "type": "array" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "secretRef is the CHAP Secret for iSCSI target and initiator authentication" + }, + "targetPortal": { + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "type": "string" + } + }, + "required": [ + "targetPortal", + "iqn", + "lun" + ], + "type": "object" + }, + "io.k8s.api.core.v1.KeyToPath": { + "description": "Maps a string key to a path within a volume.", + "properties": { + "key": { + "description": "key is the key to project.", + "type": "string" + }, + "mode": { + "description": "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "path": { + "description": "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "type": "string" + } + }, + "required": [ + "key", + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Lifecycle": { + "description": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + "properties": { + "postStart": { + "$ref": "#/definitions/io.k8s.api.core.v1.LifecycleHandler", + "description": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + }, + "preStop": { + "$ref": "#/definitions/io.k8s.api.core.v1.LifecycleHandler", + "description": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.LifecycleHandler": { + "description": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.", + "properties": { + "exec": { + "$ref": "#/definitions/io.k8s.api.core.v1.ExecAction", + "description": "Exec specifies the action to take." + }, + "httpGet": { + "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction", + "description": "HTTPGet specifies the http request to perform." + }, + "sleep": { + "$ref": "#/definitions/io.k8s.api.core.v1.SleepAction", + "description": "Sleep represents the duration that the container should sleep before being terminated." + }, + "tcpSocket": { + "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction", + "description": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.LimitRange": { + "description": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.LimitRangeSpec", + "description": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "LimitRange", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.LimitRangeItem": { + "description": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", + "properties": { + "default": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Default resource requirement limit value by resource name if resource limit is omitted.", + "type": "object" + }, + "defaultRequest": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", + "type": "object" + }, + "max": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Max usage constraints on this kind by resource name.", + "type": "object" + }, + "maxLimitRequestRatio": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", + "type": "object" + }, + "min": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Min usage constraints on this kind by resource name.", + "type": "object" + }, + "type": { + "description": "Type of resource that this limit applies to.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "io.k8s.api.core.v1.LimitRangeList": { + "description": "LimitRangeList is a list of LimitRange items.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LimitRange" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "LimitRangeList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.LimitRangeSpec": { + "description": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", + "properties": { + "limits": { + "description": "Limits is the list of LimitRangeItem objects that are enforced.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LimitRangeItem" + }, + "type": "array" + } + }, + "required": [ + "limits" + ], + "type": "object" + }, + "io.k8s.api.core.v1.LoadBalancerIngress": { + "description": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", + "properties": { + "hostname": { + "description": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "type": "string" + }, + "ip": { + "description": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", + "type": "string" + }, + "ipMode": { + "description": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.", + "type": "string" + }, + "ports": { + "description": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PortStatus" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.LoadBalancerStatus": { + "description": "LoadBalancerStatus represents the status of a load-balancer.", + "properties": { + "ingress": { + "description": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LoadBalancerIngress" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.LocalObjectReference": { + "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "properties": { + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.LocalVolumeSource": { + "description": "Local represents directly-attached storage with node affinity (Beta feature)", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", + "type": "string" + }, + "path": { + "description": "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", + "type": "string" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ModifyVolumeStatus": { + "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation", + "properties": { + "status": { + "description": "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.", + "type": "string" + }, + "targetVolumeAttributesClassName": { + "description": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled", + "type": "string" + } + }, + "required": [ + "status" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NFSVolumeSource": { + "description": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + "properties": { + "path": { + "description": "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "boolean" + }, + "server": { + "description": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "string" + } + }, + "required": [ + "server", + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Namespace": { + "description": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceSpec", + "description": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceStatus", + "description": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Namespace", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NamespaceCondition": { + "description": "NamespaceCondition contains details about state of namespace.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string" + }, + "reason": { + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of namespace controller condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NamespaceList": { + "description": "NamespaceList is a list of Namespaces.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Namespace" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "NamespaceList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NamespaceSpec": { + "description": "NamespaceSpec describes the attributes on a Namespace.", + "properties": { + "finalizers": { + "description": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NamespaceStatus": { + "description": "NamespaceStatus is information about the current status of a Namespace.", + "properties": { + "conditions": { + "description": "Represents the latest available observations of a namespace's current state.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "phase": { + "description": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.Node": { + "description": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSpec", + "description": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeStatus", + "description": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Node", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NodeAddress": { + "description": "NodeAddress contains information for the node's address.", + "properties": { + "address": { + "description": "The node address.", + "type": "string" + }, + "type": { + "description": "Node address type, one of Hostname, ExternalIP or InternalIP.", + "type": "string" + } + }, + "required": [ + "type", + "address" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NodeAffinity": { + "description": "Node affinity is a group of node affinity scheduling rules.", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PreferredSchedulingTerm" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeCondition": { + "description": "NodeCondition contains condition information for a node.", + "properties": { + "lastHeartbeatTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time we got an update on a given condition." + }, + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transit from one status to another." + }, + "message": { + "description": "Human readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "(brief) reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of node condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NodeConfigSource": { + "description": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22", + "properties": { + "configMap": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapNodeConfigSource", + "description": "ConfigMap is a reference to a Node's ConfigMap" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeConfigStatus": { + "description": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.", + "properties": { + "active": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource", + "description": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error." + }, + "assigned": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource", + "description": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned." + }, + "error": { + "description": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.", + "type": "string" + }, + "lastKnownGood": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource", + "description": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeDaemonEndpoints": { + "description": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", + "properties": { + "kubeletEndpoint": { + "$ref": "#/definitions/io.k8s.api.core.v1.DaemonEndpoint", + "description": "Endpoint on which Kubelet is listening." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeList": { + "description": "NodeList is the whole list of all Nodes which have been registered with master.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of nodes", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Node" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "NodeList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NodeSelector": { + "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorTerm" + }, + "type": "array" + } + }, + "required": [ + "nodeSelectorTerms" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.NodeSelectorRequirement": { + "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "properties": { + "key": { + "description": "The label key that the selector applies to.", + "type": "string" + }, + "operator": { + "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "type": "string" + }, + "values": { + "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NodeSelectorTerm": { + "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", + "properties": { + "matchExpressions": { + "description": "A list of node selector requirements by node's labels.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" + }, + "type": "array" + }, + "matchFields": { + "description": "A list of node selector requirements by node's fields.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" + }, + "type": "array" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.NodeSpec": { + "description": "NodeSpec describes the attributes that a node is created with.", + "properties": { + "configSource": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource", + "description": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed." + }, + "externalID": { + "description": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", + "type": "string" + }, + "podCIDR": { + "description": "PodCIDR represents the pod IP range assigned to the node.", + "type": "string" + }, + "podCIDRs": { + "description": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-patch-strategy": "merge" + }, + "providerID": { + "description": "ID of the node assigned by the cloud provider in the format: \u003cProviderName\u003e://\u003cProviderSpecificNodeID\u003e", + "type": "string" + }, + "taints": { + "description": "If specified, the node's taints.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Taint" + }, + "type": "array" + }, + "unschedulable": { + "description": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeStatus": { + "description": "NodeStatus is information about the current status of a node.", + "properties": { + "addresses": { + "description": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeAddress" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "allocatable": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", + "type": "object" + }, + "capacity": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "type": "object" + }, + "conditions": { + "description": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "config": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigStatus", + "description": "Status of the config assigned to the node via the dynamic Kubelet config feature." + }, + "daemonEndpoints": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints", + "description": "Endpoints of daemons running on the Node." + }, + "images": { + "description": "List of container images on this node", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerImage" + }, + "type": "array" + }, + "nodeInfo": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSystemInfo", + "description": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info" + }, + "phase": { + "description": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", + "type": "string" + }, + "volumesAttached": { + "description": "List of volumes that are attached to the node.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.AttachedVolume" + }, + "type": "array" + }, + "volumesInUse": { + "description": "List of attachable volumes in use (mounted) by the node.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.NodeSystemInfo": { + "description": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "properties": { + "architecture": { + "description": "The Architecture reported by the node", + "type": "string" + }, + "bootID": { + "description": "Boot ID reported by the node.", + "type": "string" + }, + "containerRuntimeVersion": { + "description": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", + "type": "string" + }, + "kernelVersion": { + "description": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + "type": "string" + }, + "kubeProxyVersion": { + "description": "KubeProxy Version reported by the node.", + "type": "string" + }, + "kubeletVersion": { + "description": "Kubelet Version reported by the node.", + "type": "string" + }, + "machineID": { + "description": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + "type": "string" + }, + "operatingSystem": { + "description": "The Operating System reported by the node", + "type": "string" + }, + "osImage": { + "description": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "type": "string" + }, + "systemUUID": { + "description": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid", + "type": "string" + } + }, + "required": [ + "machineID", + "systemUUID", + "bootID", + "kernelVersion", + "osImage", + "containerRuntimeVersion", + "kubeletVersion", + "kubeProxyVersion", + "operatingSystem", + "architecture" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ObjectFieldSelector": { + "description": "ObjectFieldSelector selects an APIVersioned field of an object.", + "properties": { + "apiVersion": { + "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + "type": "string" + }, + "fieldPath": { + "description": "Path of the field to select in the specified API version.", + "type": "string" + } + }, + "required": [ + "fieldPath" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ObjectReference": { + "description": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "properties": { + "apiVersion": { + "description": "API version of the referent.", + "type": "string" + }, + "fieldPath": { + "description": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", + "type": "string" + }, + "kind": { + "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "namespace": { + "description": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "type": "string" + }, + "resourceVersion": { + "description": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "uid": { + "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.PersistentVolume": { + "description": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec", + "description": "spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeStatus", + "description": "status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolume", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeClaim": { + "description": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec", + "description": "spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimStatus", + "description": "status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolumeClaim", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeClaimCondition": { + "description": "PersistentVolumeClaimCondition contains details about state of pvc", + "properties": { + "lastProbeTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastProbeTime is the time we probed the condition." + }, + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastTransitionTime is the time the condition transitioned from one status to another." + }, + "message": { + "description": "message is the human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "type": "string" + }, + "status": { + "type": "string" + }, + "type": { + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeClaimList": { + "description": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolumeClaimList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeClaimSpec": { + "description": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "properties": { + "accessModes": { + "description": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "items": { + "type": "string" + }, + "type": "array" + }, + "dataSource": { + "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference", + "description": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource." + }, + "dataSourceRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.TypedObjectReference", + "description": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled." + }, + "resources": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeResourceRequirements", + "description": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources" + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "selector is a label query over volumes to consider for binding." + }, + "storageClassName": { + "description": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "type": "string" + }, + "volumeAttributesClassName": { + "description": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "type": "string" + }, + "volumeMode": { + "description": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the binding reference to the PersistentVolume backing this claim.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeClaimStatus": { + "description": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "properties": { + "accessModes": { + "description": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "items": { + "type": "string" + }, + "type": "array" + }, + "allocatedResourceStatuses": { + "additionalProperties": { + "type": "string" + }, + "description": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "type": "object", + "x-kubernetes-map-type": "granular" + }, + "allocatedResources": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "type": "object" + }, + "capacity": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "capacity represents the actual resources of the underlying volume.", + "type": "object" + }, + "conditions": { + "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentVolumeAttributesClassName": { + "description": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", + "type": "string" + }, + "modifyVolumeStatus": { + "$ref": "#/definitions/io.k8s.api.core.v1.ModifyVolumeStatus", + "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature." + }, + "phase": { + "description": "phase represents the current phase of PersistentVolumeClaim.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeClaimTemplate": { + "description": "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec", + "description": "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here." + } + }, + "required": [ + "spec" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource": { + "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + "properties": { + "claimName": { + "description": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "type": "string" + }, + "readOnly": { + "description": "readOnly Will force the ReadOnly setting in VolumeMounts. Default false.", + "type": "boolean" + } + }, + "required": [ + "claimName" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeList": { + "description": "PersistentVolumeList is a list of PersistentVolume items.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolume" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolumeList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeSpec": { + "description": "PersistentVolumeSpec is the specification of a persistent volume.", + "properties": { + "accessModes": { + "description": "accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", + "items": { + "type": "string" + }, + "type": "array" + }, + "awsElasticBlockStore": { + "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource", + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + }, + "azureDisk": { + "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource", + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + }, + "azureFile": { + "$ref": "#/definitions/io.k8s.api.core.v1.AzureFilePersistentVolumeSource", + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod." + }, + "capacity": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "type": "object" + }, + "cephfs": { + "$ref": "#/definitions/io.k8s.api.core.v1.CephFSPersistentVolumeSource", + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime" + }, + "cinder": { + "$ref": "#/definitions/io.k8s.api.core.v1.CinderPersistentVolumeSource", + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + }, + "claimRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", + "x-kubernetes-map-type": "granular" + }, + "csi": { + "$ref": "#/definitions/io.k8s.api.core.v1.CSIPersistentVolumeSource", + "description": "csi represents storage that is handled by an external CSI driver (Beta feature)." + }, + "fc": { + "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource", + "description": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." + }, + "flexVolume": { + "$ref": "#/definitions/io.k8s.api.core.v1.FlexPersistentVolumeSource", + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." + }, + "flocker": { + "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource", + "description": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running" + }, + "gcePersistentDisk": { + "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource", + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + }, + "glusterfs": { + "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsPersistentVolumeSource", + "description": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md" + }, + "hostPath": { + "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource", + "description": "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + }, + "iscsi": { + "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIPersistentVolumeSource", + "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin." + }, + "local": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalVolumeSource", + "description": "local represents directly-attached storage with node affinity" + }, + "mountOptions": { + "description": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "items": { + "type": "string" + }, + "type": "array" + }, + "nfs": { + "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource", + "description": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "nodeAffinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeNodeAffinity", + "description": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume." + }, + "persistentVolumeReclaimPolicy": { + "description": "persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", + "type": "string" + }, + "photonPersistentDisk": { + "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource", + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + }, + "portworxVolume": { + "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource", + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine" + }, + "quobyte": { + "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource", + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime" + }, + "rbd": { + "$ref": "#/definitions/io.k8s.api.core.v1.RBDPersistentVolumeSource", + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md" + }, + "scaleIO": { + "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOPersistentVolumeSource", + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." + }, + "storageClassName": { + "description": "storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", + "type": "string" + }, + "storageos": { + "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSPersistentVolumeSource", + "description": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md" + }, + "volumeAttributesClassName": { + "description": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "type": "string" + }, + "volumeMode": { + "description": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", + "type": "string" + }, + "vsphereVolume": { + "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource", + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PersistentVolumeStatus": { + "description": "PersistentVolumeStatus is the current status of a persistent volume.", + "properties": { + "lastPhaseTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default)." + }, + "message": { + "description": "message is a human-readable message indicating details about why the volume is in this state.", + "type": "string" + }, + "phase": { + "description": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", + "type": "string" + }, + "reason": { + "description": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource": { + "description": "Represents a Photon Controller persistent disk resource.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "pdID": { + "description": "pdID is the ID that identifies Photon Controller persistent disk", + "type": "string" + } + }, + "required": [ + "pdID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Pod": { + "description": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSpec", + "description": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodStatus", + "description": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Pod", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodAffinity": { + "description": "Pod affinity is a group of inter pod affinity scheduling rules.", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodAffinityTerm": { + "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", + "properties": { + "labelSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods." + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces." + }, + "namespaces": { + "description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", + "items": { + "type": "string" + }, + "type": "array" + }, + "topologyKey": { + "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", + "type": "string" + } + }, + "required": [ + "topologyKey" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodAntiAffinity": { + "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" + }, + "type": "array" + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodCondition": { + "description": "PodCondition contains details for the current condition of this pod.", + "properties": { + "lastProbeTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time we probed the condition." + }, + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transitioned from one status to another." + }, + "message": { + "description": "Human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "Unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "type": "string" + }, + "type": { + "description": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodDNSConfig": { + "description": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "properties": { + "nameservers": { + "description": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + "items": { + "type": "string" + }, + "type": "array" + }, + "options": { + "description": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfigOption" + }, + "type": "array" + }, + "searches": { + "description": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodDNSConfigOption": { + "description": "PodDNSConfigOption defines DNS resolver options of a pod.", + "properties": { + "name": { + "description": "Required.", + "type": "string" + }, + "value": { + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodIP": { + "description": "PodIP represents a single IP address allocated to the pod.", + "properties": { + "ip": { + "description": "IP is the IP address assigned to the pod", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodList": { + "description": "PodList is a list of Pods.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Pod" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PodList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodOS": { + "description": "PodOS defines the OS parameters of a pod.", + "properties": { + "name": { + "description": "Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodReadinessGate": { + "description": "PodReadinessGate contains the reference to a pod condition", + "properties": { + "conditionType": { + "description": "ConditionType refers to a condition in the pod's condition list with matching type.", + "type": "string" + } + }, + "required": [ + "conditionType" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodResourceClaim": { + "description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "properties": { + "name": { + "description": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", + "type": "string" + }, + "source": { + "$ref": "#/definitions/io.k8s.api.core.v1.ClaimSource", + "description": "Source describes where to find the ResourceClaim." + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodResourceClaimStatus": { + "description": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.", + "properties": { + "name": { + "description": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.", + "type": "string" + }, + "resourceClaimName": { + "description": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodSchedulingGate": { + "description": "PodSchedulingGate is associated to a Pod to guard its scheduling.", + "properties": { + "name": { + "description": "Name of the scheduling gate. Each scheduling gate must have a unique name field.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodSecurityContext": { + "description": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "properties": { + "fsGroup": { + "description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.", + "format": "int64", + "type": "integer" + }, + "fsGroupChangePolicy": { + "description": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "type": "string" + }, + "runAsGroup": { + "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "type": "boolean" + }, + "runAsUser": { + "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "$ref": "#/definitions/io.k8s.api.core.v1.SELinuxOptions", + "description": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows." + }, + "seccompProfile": { + "$ref": "#/definitions/io.k8s.api.core.v1.SeccompProfile", + "description": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows." + }, + "supplementalGroups": { + "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", + "items": { + "format": "int64", + "type": "integer" + }, + "type": "array" + }, + "sysctls": { + "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Sysctl" + }, + "type": "array" + }, + "windowsOptions": { + "$ref": "#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions", + "description": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodSpec": { + "description": "PodSpec is a description of a pod.", + "properties": { + "activeDeadlineSeconds": { + "description": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", + "format": "int64", + "type": "integer" + }, + "affinity": { + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity", + "description": "If specified, the pod's scheduling constraints" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "type": "boolean" + }, + "containers": { + "description": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "dnsConfig": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfig", + "description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy." + }, + "dnsPolicy": { + "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "type": "string" + }, + "enableServiceLinks": { + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "type": "boolean" + }, + "ephemeralContainers": { + "description": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralContainer" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "hostAliases": { + "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.HostAlias" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge" + }, + "hostIPC": { + "description": "Use the host's ipc namespace. Optional: Default to false.", + "type": "boolean" + }, + "hostNetwork": { + "description": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "type": "boolean" + }, + "hostPID": { + "description": "Use the host's pid namespace. Optional: Default to false.", + "type": "boolean" + }, + "hostUsers": { + "description": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", + "type": "boolean" + }, + "hostname": { + "description": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + "type": "string" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "initContainers": { + "description": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "nodeName": { + "description": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "type": "string" + }, + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "os": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodOS", + "description": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup" + }, + "overhead": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "type": "object" + }, + "preemptionPolicy": { + "description": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "type": "string" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + "format": "int32", + "type": "integer" + }, + "priorityClassName": { + "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + "type": "string" + }, + "readinessGates": { + "description": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodReadinessGate" + }, + "type": "array" + }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, + "restartPolicy": { + "description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "type": "string" + }, + "runtimeClassName": { + "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + "type": "string" + }, + "schedulerName": { + "description": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + "type": "string" + }, + "schedulingGates": { + "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSchedulingGate" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext", + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field." + }, + "serviceAccount": { + "description": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "type": "string" + }, + "serviceAccountName": { + "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "type": "string" + }, + "setHostnameAsFQDN": { + "description": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", + "type": "boolean" + }, + "shareProcessNamespace": { + "description": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", + "type": "boolean" + }, + "subdomain": { + "description": "If specified, the fully qualified Pod hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the pod will not have a domainname at all.", + "type": "string" + }, + "terminationGracePeriodSeconds": { + "description": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + "format": "int64", + "type": "integer" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array" + }, + "topologySpreadConstraints": { + "description": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.TopologySpreadConstraint" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "topologyKey", + "whenUnsatisfiable" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-strategy": "merge" + }, + "volumes": { + "description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + } + }, + "required": [ + "containers" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PodStatus": { + "description": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "properties": { + "conditions": { + "description": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "containerStatuses": { + "description": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" + }, + "type": "array" + }, + "ephemeralContainerStatuses": { + "description": "Status for any ephemeral containers that have run in this pod.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" + }, + "type": "array" + }, + "hostIP": { + "description": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", + "type": "string" + }, + "hostIPs": { + "description": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.HostIP" + }, + "type": "array", + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge" + }, + "initContainerStatuses": { + "description": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" + }, + "type": "array" + }, + "message": { + "description": "A human readable message indicating details about why the pod is in this condition.", + "type": "string" + }, + "nominatedNodeName": { + "description": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "type": "string" + }, + "phase": { + "description": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "type": "string" + }, + "podIP": { + "description": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "type": "string" + }, + "podIPs": { + "description": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodIP" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge" + }, + "qosClass": { + "description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", + "type": "string" + }, + "reason": { + "description": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "type": "string" + }, + "resize": { + "description": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", + "type": "string" + }, + "resourceClaimStatuses": { + "description": "Status of resource claims.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaimStatus" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, + "startTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PodTemplate": { + "description": "PodTemplate describes a template for creating copies of a predefined pod.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "template": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec", + "description": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PodTemplate", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodTemplateList": { + "description": "PodTemplateList is a list of PodTemplates.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of pod templates", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplate" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PodTemplateList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodTemplateSpec": { + "description": "PodTemplateSpec describes the data a pod should have when created from a template", + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSpec", + "description": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.PortStatus": { + "properties": { + "error": { + "description": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "type": "string" + }, + "port": { + "description": "Port is the port number of the service port of which status is recorded here", + "format": "int32", + "type": "integer" + }, + "protocol": { + "description": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "type": "string" + } + }, + "required": [ + "port", + "protocol" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PortworxVolumeSource": { + "description": "PortworxVolumeSource represents a Portworx volume resource.", + "properties": { + "fsType": { + "description": "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "volumeID": { + "description": "volumeID uniquely identifies a Portworx volume", + "type": "string" + } + }, + "required": [ + "volumeID" + ], + "type": "object" + }, + "io.k8s.api.core.v1.PreferredSchedulingTerm": { + "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "properties": { + "preference": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorTerm", + "description": "A node selector term, associated with the corresponding weight." + }, + "weight": { + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "weight", + "preference" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Probe": { + "description": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "properties": { + "exec": { + "$ref": "#/definitions/io.k8s.api.core.v1.ExecAction", + "description": "Exec specifies the action to take." + }, + "failureThreshold": { + "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", + "format": "int32", + "type": "integer" + }, + "grpc": { + "$ref": "#/definitions/io.k8s.api.core.v1.GRPCAction", + "description": "GRPC specifies an action involving a GRPC port." + }, + "httpGet": { + "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction", + "description": "HTTPGet specifies the http request to perform." + }, + "initialDelaySeconds": { + "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "format": "int32", + "type": "integer" + }, + "periodSeconds": { + "description": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", + "format": "int32", + "type": "integer" + }, + "successThreshold": { + "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + "format": "int32", + "type": "integer" + }, + "tcpSocket": { + "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction", + "description": "TCPSocket specifies an action involving a TCP port." + }, + "terminationGracePeriodSeconds": { + "description": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + "format": "int64", + "type": "integer" + }, + "timeoutSeconds": { + "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ProjectedVolumeSource": { + "description": "Represents a projected volume source", + "properties": { + "defaultMode": { + "description": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "sources": { + "description": "sources is the list of volume projections", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeProjection" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.QuobyteVolumeSource": { + "description": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + "properties": { + "group": { + "description": "group to map volume access to Default is no group", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "type": "boolean" + }, + "registry": { + "description": "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "type": "string" + }, + "tenant": { + "description": "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", + "type": "string" + }, + "user": { + "description": "user to map volume access to Defaults to serivceaccount user", + "type": "string" + }, + "volume": { + "description": "volume is a string that references an already created Quobyte volume by name.", + "type": "string" + } + }, + "required": [ + "registry", + "volume" + ], + "type": "object" + }, + "io.k8s.api.core.v1.RBDPersistentVolumeSource": { + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "type": "string" + }, + "image": { + "description": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "keyring": { + "description": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "monitors": { + "description": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "items": { + "type": "string" + }, + "type": "array" + }, + "pool": { + "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + }, + "user": { + "description": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + } + }, + "required": [ + "monitors", + "image" + ], + "type": "object" + }, + "io.k8s.api.core.v1.RBDVolumeSource": { + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "type": "string" + }, + "image": { + "description": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "keyring": { + "description": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "monitors": { + "description": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "items": { + "type": "string" + }, + "type": "array" + }, + "pool": { + "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it" + }, + "user": { + "description": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + } + }, + "required": [ + "monitors", + "image" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ReplicationController": { + "description": "ReplicationController represents the configuration of a replication controller.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerSpec", + "description": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerStatus", + "description": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ReplicationController", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ReplicationControllerCondition": { + "description": "ReplicationControllerCondition describes the state of a replication controller at a certain point.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "The last time the condition transitioned from one status to another." + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of replication controller condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ReplicationControllerList": { + "description": "ReplicationControllerList is a collection of replication controllers.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationController" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ReplicationControllerList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ReplicationControllerSpec": { + "description": "ReplicationControllerSpec is the specification of a replication controller.", + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "format": "int32", + "type": "integer" + }, + "replicas": { + "description": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "format": "int32", + "type": "integer" + }, + "selector": { + "additionalProperties": { + "type": "string" + }, + "description": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "template": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec", + "description": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ReplicationControllerStatus": { + "description": "ReplicationControllerStatus represents the current status of a replication controller.", + "properties": { + "availableReplicas": { + "description": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", + "format": "int32", + "type": "integer" + }, + "conditions": { + "description": "Represents the latest available observations of a replication controller's current state.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerCondition" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "fullyLabeledReplicas": { + "description": "The number of pods that have labels matching the labels of the pod template of the replication controller.", + "format": "int32", + "type": "integer" + }, + "observedGeneration": { + "description": "ObservedGeneration reflects the generation of the most recently observed replication controller.", + "format": "int64", + "type": "integer" + }, + "readyReplicas": { + "description": "The number of ready replicas for this replication controller.", + "format": "int32", + "type": "integer" + }, + "replicas": { + "description": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "replicas" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ResourceClaim": { + "description": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "properties": { + "name": { + "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ResourceFieldSelector": { + "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "properties": { + "containerName": { + "description": "Container name: required for volumes, optional for env vars", + "type": "string" + }, + "divisor": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "Specifies the output format of the exposed resources, defaults to \"1\"" + }, + "resource": { + "description": "Required: resource to select", + "type": "string" + } + }, + "required": [ + "resource" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ResourceQuota": { + "description": "ResourceQuota sets aggregate quota restrictions enforced per namespace", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuotaSpec", + "description": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuotaStatus", + "description": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ResourceQuota", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ResourceQuotaList": { + "description": "ResourceQuotaList is a list of ResourceQuota items.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuota" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ResourceQuotaList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ResourceQuotaSpec": { + "description": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + "properties": { + "hard": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "type": "object" + }, + "scopeSelector": { + "$ref": "#/definitions/io.k8s.api.core.v1.ScopeSelector", + "description": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched." + }, + "scopes": { + "description": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ResourceQuotaStatus": { + "description": "ResourceQuotaStatus defines the enforced hard limits and observed use.", + "properties": { + "hard": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "type": "object" + }, + "used": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Used is the current observed total usage of the resource in the namespace.", + "type": "object" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ResourceRequirements": { + "description": "ResourceRequirements describes the compute resource requirements.", + "properties": { + "claims": { + "description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceClaim" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, + "limits": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + }, + "requests": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SELinuxOptions": { + "description": "SELinuxOptions are the labels to be applied to the container", + "properties": { + "level": { + "description": "Level is SELinux level label that applies to the container.", + "type": "string" + }, + "role": { + "description": "Role is a SELinux role label that applies to the container.", + "type": "string" + }, + "type": { + "description": "Type is a SELinux type label that applies to the container.", + "type": "string" + }, + "user": { + "description": "User is a SELinux user label that applies to the container.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ScaleIOPersistentVolumeSource": { + "description": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", + "type": "string" + }, + "gateway": { + "description": "gateway is the host address of the ScaleIO API Gateway.", + "type": "string" + }, + "protectionDomain": { + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference", + "description": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail." + }, + "sslEnabled": { + "description": "sslEnabled is the flag to enable/disable SSL communication with Gateway, default false", + "type": "boolean" + }, + "storageMode": { + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "type": "string" + }, + "storagePool": { + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", + "type": "string" + }, + "system": { + "description": "system is the name of the storage system as configured in ScaleIO.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", + "type": "string" + } + }, + "required": [ + "gateway", + "system", + "secretRef" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ScaleIOVolumeSource": { + "description": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + "type": "string" + }, + "gateway": { + "description": "gateway is the host address of the ScaleIO API Gateway.", + "type": "string" + }, + "protectionDomain": { + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + "type": "string" + }, + "readOnly": { + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail." + }, + "sslEnabled": { + "description": "sslEnabled Flag enable/disable SSL communication with Gateway, default false", + "type": "boolean" + }, + "storageMode": { + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "type": "string" + }, + "storagePool": { + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", + "type": "string" + }, + "system": { + "description": "system is the name of the storage system as configured in ScaleIO.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", + "type": "string" + } + }, + "required": [ + "gateway", + "system", + "secretRef" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ScopeSelector": { + "description": "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.", + "properties": { + "matchExpressions": { + "description": "A list of scope selector requirements by scope of the resources.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ScopedResourceSelectorRequirement" + }, + "type": "array" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ScopedResourceSelectorRequirement": { + "description": "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.", + "properties": { + "operator": { + "description": "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.", + "type": "string" + }, + "scopeName": { + "description": "The name of the scope that the selector applies to.", + "type": "string" + }, + "values": { + "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "scopeName", + "operator" + ], + "type": "object" + }, + "io.k8s.api.core.v1.SeccompProfile": { + "description": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.", + "properties": { + "localhostProfile": { + "description": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type.", + "type": "string" + }, + "type": { + "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "localhostProfile": "LocalhostProfile" + } + } + ] + }, + "io.k8s.api.core.v1.Secret": { + "description": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "data": { + "additionalProperties": { + "format": "byte", + "type": "string" + }, + "description": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + "type": "object" + }, + "immutable": { + "description": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.", + "type": "boolean" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "stringData": { + "additionalProperties": { + "type": "string" + }, + "description": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.", + "type": "object" + }, + "type": { + "description": "Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Secret", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.SecretEnvSource": { + "description": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "properties": { + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the Secret must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SecretKeySelector": { + "description": "SecretKeySelector selects a key of a Secret.", + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the Secret or its key must be defined", + "type": "boolean" + } + }, + "required": [ + "key" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.SecretList": { + "description": "SecretList is a list of Secret.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Secret" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "SecretList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.SecretProjection": { + "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "properties": { + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + }, + "type": "array" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional field specify whether the Secret or its key must be defined", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SecretReference": { + "description": "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", + "properties": { + "name": { + "description": "name is unique within a namespace to reference a secret resource.", + "type": "string" + }, + "namespace": { + "description": "namespace defines the space within which the secret name must be unique.", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.SecretVolumeSource": { + "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + "properties": { + "defaultMode": { + "description": "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "format": "int32", + "type": "integer" + }, + "items": { + "description": "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + }, + "type": "array" + }, + "optional": { + "description": "optional field specify whether the Secret or its keys must be defined", + "type": "boolean" + }, + "secretName": { + "description": "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SecurityContext": { + "description": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "properties": { + "allowPrivilegeEscalation": { + "description": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", + "type": "boolean" + }, + "capabilities": { + "$ref": "#/definitions/io.k8s.api.core.v1.Capabilities", + "description": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows." + }, + "privileged": { + "description": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.", + "type": "boolean" + }, + "procMount": { + "description": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "type": "string" + }, + "readOnlyRootFilesystem": { + "description": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", + "type": "boolean" + }, + "runAsGroup": { + "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "format": "int64", + "type": "integer" + }, + "runAsNonRoot": { + "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "type": "boolean" + }, + "runAsUser": { + "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "format": "int64", + "type": "integer" + }, + "seLinuxOptions": { + "$ref": "#/definitions/io.k8s.api.core.v1.SELinuxOptions", + "description": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows." + }, + "seccompProfile": { + "$ref": "#/definitions/io.k8s.api.core.v1.SeccompProfile", + "description": "The seccomp options to use by this container. If seccomp options are provided at both the pod \u0026 container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows." + }, + "windowsOptions": { + "$ref": "#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions", + "description": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.Service": { + "description": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceSpec", + "description": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceStatus", + "description": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Service", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServiceAccount": { + "description": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", + "type": "boolean" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "secrets": { + "description": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ServiceAccount", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServiceAccountList": { + "description": "ServiceAccountList is a list of ServiceAccount objects", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceAccount" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ServiceAccountList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServiceAccountTokenProjection": { + "description": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", + "properties": { + "audience": { + "description": "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "type": "string" + }, + "expirationSeconds": { + "description": "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "format": "int64", + "type": "integer" + }, + "path": { + "description": "path is the path relative to the mount point of the file to project the token into.", + "type": "string" + } + }, + "required": [ + "path" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ServiceList": { + "description": "ServiceList holds a list of services.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of services", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Service" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ServiceList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServicePort": { + "description": "ServicePort contains information on service's port.", + "properties": { + "appProtocol": { + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "type": "string" + }, + "name": { + "description": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", + "type": "string" + }, + "nodePort": { + "description": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + "format": "int32", + "type": "integer" + }, + "port": { + "description": "The port that will be exposed by this service.", + "format": "int32", + "type": "integer" + }, + "protocol": { + "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + "type": "string" + }, + "targetPort": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service" + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "io.k8s.api.core.v1.ServiceSpec": { + "description": "ServiceSpec describes the attributes that a user creates on a service.", + "properties": { + "allocateLoadBalancerNodePorts": { + "description": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", + "type": "boolean" + }, + "clusterIP": { + "description": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "string" + }, + "clusterIPs": { + "description": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "externalIPs": { + "description": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "items": { + "type": "string" + }, + "type": "array" + }, + "externalName": { + "description": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", + "type": "string" + }, + "externalTrafficPolicy": { + "description": "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.", + "type": "string" + }, + "healthCheckNodePort": { + "description": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set.", + "format": "int32", + "type": "integer" + }, + "internalTrafficPolicy": { + "description": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", + "type": "string" + }, + "ipFamilies": { + "description": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "ipFamilyPolicy": { + "description": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", + "type": "string" + }, + "loadBalancerClass": { + "description": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", + "type": "string" + }, + "loadBalancerIP": { + "description": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.", + "type": "string" + }, + "loadBalancerSourceRanges": { + "description": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/", + "items": { + "type": "string" + }, + "type": "array" + }, + "ports": { + "description": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServicePort" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "port", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "port", + "x-kubernetes-patch-strategy": "merge" + }, + "publishNotReadyAddresses": { + "description": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", + "type": "boolean" + }, + "selector": { + "additionalProperties": { + "type": "string" + }, + "description": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "sessionAffinity": { + "description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "string" + }, + "sessionAffinityConfig": { + "$ref": "#/definitions/io.k8s.api.core.v1.SessionAffinityConfig", + "description": "sessionAffinityConfig contains the configurations of session affinity." + }, + "type": { + "description": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.ServiceStatus": { + "description": "ServiceStatus represents the current status of a service.", + "properties": { + "conditions": { + "description": "Current service state", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "loadBalancer": { + "$ref": "#/definitions/io.k8s.api.core.v1.LoadBalancerStatus", + "description": "LoadBalancer contains the current status of the load-balancer, if one is present." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SessionAffinityConfig": { + "description": "SessionAffinityConfig represents the configurations of session affinity.", + "properties": { + "clientIP": { + "$ref": "#/definitions/io.k8s.api.core.v1.ClientIPConfig", + "description": "clientIP contains the configurations of Client IP based session affinity." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.SleepAction": { + "description": "SleepAction describes a \"sleep\" action.", + "properties": { + "seconds": { + "description": "Seconds is the number of seconds to sleep.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "seconds" + ], + "type": "object" + }, + "io.k8s.api.core.v1.StorageOSPersistentVolumeSource": { + "description": "Represents a StorageOS persistent volume resource.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted." + }, + "volumeName": { + "description": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "type": "string" + }, + "volumeNamespace": { + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.StorageOSVolumeSource": { + "description": "Represents a StorageOS persistent volume resource.", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference", + "description": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted." + }, + "volumeName": { + "description": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "type": "string" + }, + "volumeNamespace": { + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.Sysctl": { + "description": "Sysctl defines a kernel parameter to be set", + "properties": { + "name": { + "description": "Name of a property to set", + "type": "string" + }, + "value": { + "description": "Value of a property to set", + "type": "string" + } + }, + "required": [ + "name", + "value" + ], + "type": "object" + }, + "io.k8s.api.core.v1.TCPSocketAction": { + "description": "TCPSocketAction describes an action based on opening a socket", + "properties": { + "host": { + "description": "Optional: Host name to connect to, defaults to the pod IP.", + "type": "string" + }, + "port": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME." + } + }, + "required": [ + "port" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Taint": { + "description": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", + "properties": { + "effect": { + "description": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" + }, + "key": { + "description": "Required. The taint key to be applied to a node.", + "type": "string" + }, + "timeAdded": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints." + }, + "value": { + "description": "The taint value corresponding to the taint key.", + "type": "string" + } + }, + "required": [ + "key", + "effect" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Toleration": { + "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", + "properties": { + "effect": { + "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" + }, + "key": { + "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "type": "string" + }, + "operator": { + "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "type": "string" + }, + "tolerationSeconds": { + "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", + "format": "int64", + "type": "integer" + }, + "value": { + "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.TopologySelectorLabelRequirement": { + "description": "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.", + "properties": { + "key": { + "description": "The label key that the selector applies to.", + "type": "string" + }, + "values": { + "description": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "values" + ], + "type": "object" + }, + "io.k8s.api.core.v1.TopologySelectorTerm": { + "description": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "properties": { + "matchLabelExpressions": { + "description": "A list of topology selector requirements by labels.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.TopologySelectorLabelRequirement" + }, + "type": "array" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.TopologySpreadConstraint": { + "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "properties": { + "labelSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain." + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "maxSkew": { + "description": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.", + "format": "int32", + "type": "integer" + }, + "minDomains": { + "description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).", + "format": "int32", + "type": "integer" + }, + "nodeAffinityPolicy": { + "description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "type": "string" + }, + "nodeTaintsPolicy": { + "description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "type": "string" + }, + "topologyKey": { + "description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.", + "type": "string" + }, + "whenUnsatisfiable": { + "description": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.", + "type": "string" + } + }, + "required": [ + "maxSkew", + "topologyKey", + "whenUnsatisfiable" + ], + "type": "object" + }, + "io.k8s.api.core.v1.TypedLocalObjectReference": { + "description": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.TypedObjectReference": { + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.Volume": { + "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "properties": { + "awsElasticBlockStore": { + "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource", + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore" + }, + "azureDisk": { + "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource", + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod." + }, + "azureFile": { + "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource", + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod." + }, + "cephfs": { + "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource", + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime" + }, + "cinder": { + "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource", + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md" + }, + "configMap": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource", + "description": "configMap represents a configMap that should populate this volume" + }, + "csi": { + "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource", + "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature)." + }, + "downwardAPI": { + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource", + "description": "downwardAPI represents downward API about the pod that should populate this volume" + }, + "emptyDir": { + "$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource", + "description": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir" + }, + "ephemeral": { + "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource", + "description": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time." + }, + "fc": { + "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource", + "description": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod." + }, + "flexVolume": { + "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource", + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin." + }, + "flocker": { + "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource", + "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running" + }, + "gcePersistentDisk": { + "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource", + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk" + }, + "gitRepo": { + "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource", + "description": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container." + }, + "glusterfs": { + "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource", + "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md" + }, + "hostPath": { + "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource", + "description": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath" + }, + "iscsi": { + "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource", + "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md" + }, + "name": { + "description": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "nfs": { + "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource", + "description": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs" + }, + "persistentVolumeClaim": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource", + "description": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims" + }, + "photonPersistentDisk": { + "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource", + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine" + }, + "portworxVolume": { + "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource", + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine" + }, + "projected": { + "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource", + "description": "projected items for all in one resources secrets, configmaps, and downward API" + }, + "quobyte": { + "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource", + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime" + }, + "rbd": { + "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource", + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md" + }, + "scaleIO": { + "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource", + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes." + }, + "secret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource", + "description": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret" + }, + "storageos": { + "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource", + "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes." + }, + "vsphereVolume": { + "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource", + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.core.v1.VolumeDevice": { + "description": "volumeDevice describes a mapping of a raw block device within a container.", + "properties": { + "devicePath": { + "description": "devicePath is the path inside of the container that the device will be mapped to.", + "type": "string" + }, + "name": { + "description": "name must match the name of a persistentVolumeClaim in the pod", + "type": "string" + } + }, + "required": [ + "name", + "devicePath" + ], + "type": "object" + }, + "io.k8s.api.core.v1.VolumeMount": { + "description": "VolumeMount describes a mounting of a Volume within a container.", + "properties": { + "mountPath": { + "description": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "type": "string" + }, + "mountPropagation": { + "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + "type": "string" + }, + "name": { + "description": "This must match the Name of a Volume.", + "type": "string" + }, + "readOnly": { + "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "type": "boolean" + }, + "subPath": { + "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "type": "string" + }, + "subPathExpr": { + "description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", + "type": "string" + } + }, + "required": [ + "name", + "mountPath" + ], + "type": "object" + }, + "io.k8s.api.core.v1.VolumeNodeAffinity": { + "description": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", + "properties": { + "required": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "required specifies hard node constraints that must be met." + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.VolumeProjection": { + "description": "Projection that may be projected along with other supported volume types", + "properties": { + "clusterTrustBundle": { + "$ref": "#/definitions/io.k8s.api.core.v1.ClusterTrustBundleProjection", + "description": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time." + }, + "configMap": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapProjection", + "description": "configMap information about the configMap data to project" + }, + "downwardAPI": { + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIProjection", + "description": "downwardAPI information about the downwardAPI data to project" + }, + "secret": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretProjection", + "description": "secret information about the secret data to project" + }, + "serviceAccountToken": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceAccountTokenProjection", + "description": "serviceAccountToken is information about the serviceAccountToken data to project" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.VolumeResourceRequirements": { + "description": "VolumeResourceRequirements describes the storage resource requirements for a volume.", + "properties": { + "limits": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + }, + "requests": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object" + } + }, + "type": "object" + }, + "io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource": { + "description": "Represents a vSphere volume resource.", + "properties": { + "fsType": { + "description": "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "storagePolicyID": { + "description": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + "type": "string" + }, + "storagePolicyName": { + "description": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.", + "type": "string" + }, + "volumePath": { + "description": "volumePath is the path that identifies vSphere volume vmdk", + "type": "string" + } + }, + "required": [ + "volumePath" + ], + "type": "object" + }, + "io.k8s.api.core.v1.WeightedPodAffinityTerm": { + "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "properties": { + "podAffinityTerm": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm", + "description": "Required. A pod affinity term, associated with the corresponding weight." + }, + "weight": { + "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "weight", + "podAffinityTerm" + ], + "type": "object" + }, + "io.k8s.api.core.v1.WindowsSecurityContextOptions": { + "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", + "properties": { + "gmsaCredentialSpec": { + "description": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", + "type": "string" + }, + "gmsaCredentialSpecName": { + "description": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", + "type": "string" + }, + "hostProcess": { + "description": "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", + "type": "boolean" + }, + "runAsUserName": { + "description": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.discovery.v1.Endpoint": { + "description": "Endpoint represents a single logical \"backend\" implementing a service.", + "properties": { + "addresses": { + "description": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "conditions": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointConditions", + "description": "conditions contains information about the current status of the endpoint." + }, + "deprecatedTopology": { + "additionalProperties": { + "type": "string" + }, + "description": "deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead.", + "type": "object" + }, + "hints": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointHints", + "description": "hints contains information associated with how an endpoint should be consumed." + }, + "hostname": { + "description": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", + "type": "string" + }, + "nodeName": { + "description": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node.", + "type": "string" + }, + "targetRef": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "targetRef is a reference to a Kubernetes object that represents this endpoint." + }, + "zone": { + "description": "zone is the name of the Zone this endpoint exists in.", + "type": "string" + } + }, + "required": [ + "addresses" + ], + "type": "object" + }, + "io.k8s.api.discovery.v1.EndpointConditions": { + "description": "EndpointConditions represents the current condition of an endpoint.", + "properties": { + "ready": { + "description": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", + "type": "boolean" + }, + "serving": { + "description": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", + "type": "boolean" + }, + "terminating": { + "description": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.discovery.v1.EndpointHints": { + "description": "EndpointHints provides hints describing how an endpoint should be consumed.", + "properties": { + "forZones": { + "description": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.ForZone" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.discovery.v1.EndpointPort": { + "description": "EndpointPort represents a Port used by an EndpointSlice", + "properties": { + "appProtocol": { + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "type": "string" + }, + "name": { + "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "type": "string" + }, + "port": { + "description": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "format": "int32", + "type": "integer" + }, + "protocol": { + "description": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.discovery.v1.EndpointSlice": { + "description": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "properties": { + "addressType": { + "description": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.", + "type": "string" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "endpoints": { + "description": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.Endpoint" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata." + }, + "ports": { + "description": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointPort" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "addressType", + "endpoints" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "discovery.k8s.io", + "kind": "EndpointSlice", + "version": "v1" + } + ] + }, + "io.k8s.api.discovery.v1.EndpointSliceList": { + "description": "EndpointSliceList represents a list of endpoint slices", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of endpoint slices", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointSlice" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "discovery.k8s.io", + "kind": "EndpointSliceList", + "version": "v1" + } + ] + }, + "io.k8s.api.discovery.v1.ForZone": { + "description": "ForZone provides information about which zones should consume this endpoint.", + "properties": { + "name": { + "description": "name represents the name of the zone.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.events.v1.Event": { + "description": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", + "properties": { + "action": { + "description": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.", + "type": "string" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "deprecatedCount": { + "description": "deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.", + "format": "int32", + "type": "integer" + }, + "deprecatedFirstTimestamp": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type." + }, + "deprecatedLastTimestamp": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type." + }, + "deprecatedSource": { + "$ref": "#/definitions/io.k8s.api.core.v1.EventSource", + "description": "deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type." + }, + "eventTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "eventTime is the time when this Event was first observed. It is required." + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "note": { + "description": "note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + "type": "string" + }, + "reason": { + "description": "reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.", + "type": "string" + }, + "regarding": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object." + }, + "related": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference", + "description": "related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object." + }, + "reportingController": { + "description": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", + "type": "string" + }, + "reportingInstance": { + "description": "reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.", + "type": "string" + }, + "series": { + "$ref": "#/definitions/io.k8s.api.events.v1.EventSeries", + "description": "series is data about the Event series this event represents or nil if it's a singleton Event." + }, + "type": { + "description": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.", + "type": "string" + } + }, + "required": [ + "eventTime" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "events.k8s.io", + "kind": "Event", + "version": "v1" + } + ] + }, + "io.k8s.api.events.v1.EventList": { + "description": "EventList is a list of Event objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.events.v1.Event" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "events.k8s.io", + "kind": "EventList", + "version": "v1" + } + ] + }, + "io.k8s.api.events.v1.EventSeries": { + "description": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in \"k8s.io/client-go/tools/events/event_broadcaster.go\" shows how this struct is updated on heartbeats and can guide customized reporter implementations.", + "properties": { + "count": { + "description": "count is the number of occurrences in this series up to the last heartbeat time.", + "format": "int32", + "type": "integer" + }, + "lastObservedTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime", + "description": "lastObservedTime is the time when last Event from the series was seen before last heartbeat." + } + }, + "required": [ + "count", + "lastObservedTime" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration": { + "description": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "properties": { + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "format": "int32", + "type": "integer" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod": { + "description": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "properties": { + "type": { + "description": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.FlowSchema": { + "description": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchemaSpec", + "description": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchemaStatus", + "description": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchema", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaCondition": { + "description": "FlowSchemaCondition describes conditions for a FlowSchema.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaList": { + "description": "FlowSchemaList is a list of FlowSchema objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of FlowSchemas.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchema" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchemaList", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaSpec": { + "description": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "properties": { + "distinguisherMethod": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod", + "description": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string." + }, + "matchingPrecedence": { + "description": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "format": "int32", + "type": "integer" + }, + "priorityLevelConfiguration": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference", + "description": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required." + }, + "rules": { + "description": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "priorityLevelConfiguration" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaStatus": { + "description": "FlowSchemaStatus represents the current state of a FlowSchema.", + "properties": { + "conditions": { + "description": "`conditions` is a list of the current states of FlowSchema.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchemaCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.GroupSubject": { + "description": "GroupSubject holds detailed information for group-kind subject.", + "properties": { + "name": { + "description": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.LimitResponse": { + "description": "LimitResponse defines how to handle requests that can not be executed right now.", + "properties": { + "queuing": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.QueuingConfiguration", + "description": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`." + }, + "type": { + "description": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "queuing": "Queuing" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration": { + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", + "properties": { + "borrowingLimitPercent": { + "description": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", + "format": "int32", + "type": "integer" + }, + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "format": "int32", + "type": "integer" + }, + "limitResponse": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.LimitResponse", + "description": "`limitResponse` indicates what to do with requests that can not be executed right now" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\n\nIf not specified, this field defaults to a value of 30.\n\nSetting this field to zero supports the construction of a \"jail\" for this priority level that is used to hold some request(s)", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.NonResourcePolicyRule": { + "description": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "properties": { + "nonResourceURLs": { + "description": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "required": [ + "verbs", + "nonResourceURLs" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects": { + "description": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "properties": { + "nonResourceRules": { + "description": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.NonResourcePolicyRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resourceRules": { + "description": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.ResourcePolicyRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "subjects": { + "description": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.Subject" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "subjects" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration": { + "description": "PriorityLevelConfiguration represents the configuration of a priority level.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec", + "description": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus", + "description": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition": { + "description": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationList": { + "description": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of request-priorities.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfigurationList", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference": { + "description": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "properties": { + "name": { + "description": "`name` is the name of the priority level configuration being referenced Required.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec": { + "description": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "properties": { + "exempt": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration", + "description": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply." + }, + "limited": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration", + "description": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`." + }, + "type": { + "description": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "exempt": "Exempt", + "limited": "Limited" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus": { + "description": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "properties": { + "conditions": { + "description": "`conditions` is the current state of \"request-priority\".", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.QueuingConfiguration": { + "description": "QueuingConfiguration holds the configuration parameters for queuing", + "properties": { + "handSize": { + "description": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "format": "int32", + "type": "integer" + }, + "queueLengthLimit": { + "description": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", + "format": "int32", + "type": "integer" + }, + "queues": { + "description": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.ResourcePolicyRule": { + "description": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "properties": { + "apiGroups": { + "description": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "clusterScope": { + "description": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "type": "boolean" + }, + "namespaces": { + "description": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "resources": { + "description": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "required": [ + "verbs", + "apiGroups", + "resources" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.ServiceAccountSubject": { + "description": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "properties": { + "name": { + "description": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", + "type": "string" + }, + "namespace": { + "description": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "type": "string" + } + }, + "required": [ + "namespace", + "name" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1.Subject": { + "description": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "properties": { + "group": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.GroupSubject", + "description": "`group` matches based on user group name." + }, + "kind": { + "description": "`kind` indicates which one of the other fields is non-empty. Required", + "type": "string" + }, + "serviceAccount": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.ServiceAccountSubject", + "description": "`serviceAccount` matches ServiceAccounts." + }, + "user": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.UserSubject", + "description": "`user` matches based on username." + } + }, + "required": [ + "kind" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "kind", + "fields-to-discriminateBy": { + "group": "Group", + "serviceAccount": "ServiceAccount", + "user": "User" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1.UserSubject": { + "description": "UserSubject holds detailed information for user-kind subject.", + "properties": { + "name": { + "description": "`name` is the username that matches, or \"*\" to match all usernames. Required.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration": { + "description": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "properties": { + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "format": "int32", + "type": "integer" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod": { + "description": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "properties": { + "type": { + "description": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchema": { + "description": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec", + "description": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus", + "description": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchema", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition": { + "description": "FlowSchemaCondition describes conditions for a FlowSchema.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaList": { + "description": "FlowSchemaList is a list of FlowSchema objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of FlowSchemas.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchema" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchemaList", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec": { + "description": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "properties": { + "distinguisherMethod": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod", + "description": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string." + }, + "matchingPrecedence": { + "description": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "format": "int32", + "type": "integer" + }, + "priorityLevelConfiguration": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference", + "description": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required." + }, + "rules": { + "description": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "priorityLevelConfiguration" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus": { + "description": "FlowSchemaStatus represents the current state of a FlowSchema.", + "properties": { + "conditions": { + "description": "`conditions` is a list of the current states of FlowSchema.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.GroupSubject": { + "description": "GroupSubject holds detailed information for group-kind subject.", + "properties": { + "name": { + "description": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.LimitResponse": { + "description": "LimitResponse defines how to handle requests that can not be executed right now.", + "properties": { + "queuing": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration", + "description": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`." + }, + "type": { + "description": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "queuing": "Queuing" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration": { + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", + "properties": { + "borrowingLimitPercent": { + "description": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", + "format": "int32", + "type": "integer" + }, + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "format": "int32", + "type": "integer" + }, + "limitResponse": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.LimitResponse", + "description": "`limitResponse` indicates what to do with requests that can not be executed right now" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule": { + "description": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "properties": { + "nonResourceURLs": { + "description": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "required": [ + "verbs", + "nonResourceURLs" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects": { + "description": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "properties": { + "nonResourceRules": { + "description": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "resourceRules": { + "description": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "subjects": { + "description": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.Subject" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "subjects" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration": { + "description": "PriorityLevelConfiguration represents the configuration of a priority level.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec", + "description": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus", + "description": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfiguration", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition": { + "description": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another." + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationList": { + "description": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of request-priorities.", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfigurationList", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference": { + "description": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "properties": { + "name": { + "description": "`name` is the name of the priority level configuration being referenced Required.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec": { + "description": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "properties": { + "exempt": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration", + "description": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply." + }, + "limited": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration", + "description": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`." + }, + "type": { + "description": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "type": "string" + } + }, + "required": [ + "type" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "exempt": "Exempt", + "limited": "Limited" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus": { + "description": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "properties": { + "conditions": { + "description": "`conditions` is the current state of \"request-priority\".", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration": { + "description": "QueuingConfiguration holds the configuration parameters for queuing", + "properties": { + "handSize": { + "description": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "format": "int32", + "type": "integer" + }, + "queueLengthLimit": { + "description": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", + "format": "int32", + "type": "integer" + }, + "queues": { + "description": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule": { + "description": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "properties": { + "apiGroups": { + "description": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "clusterScope": { + "description": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "type": "boolean" + }, + "namespaces": { + "description": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "resources": { + "description": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "required": [ + "verbs", + "apiGroups", + "resources" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject": { + "description": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "properties": { + "name": { + "description": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", + "type": "string" + }, + "namespace": { + "description": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "type": "string" + } + }, + "required": [ + "namespace", + "name" + ], + "type": "object" + }, + "io.k8s.api.flowcontrol.v1beta3.Subject": { + "description": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "properties": { + "group": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.GroupSubject", + "description": "`group` matches based on user group name." + }, + "kind": { + "description": "`kind` indicates which one of the other fields is non-empty. Required", + "type": "string" + }, + "serviceAccount": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject", + "description": "`serviceAccount` matches ServiceAccounts." + }, + "user": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.UserSubject", + "description": "`user` matches based on username." + } + }, + "required": [ + "kind" + ], + "type": "object", + "x-kubernetes-unions": [ + { + "discriminator": "kind", + "fields-to-discriminateBy": { + "group": "Group", + "serviceAccount": "ServiceAccount", + "user": "User" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.UserSubject": { + "description": "UserSubject holds detailed information for user-kind subject.", + "properties": { + "name": { + "description": "`name` is the username that matches, or \"*\" to match all usernames. Required.", + "type": "string" + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.HTTPIngressPath": { + "description": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", + "properties": { + "backend": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressBackend", + "description": "backend defines the referenced service endpoint to which the traffic will be forwarded to." + }, + "path": { + "description": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "type": "string" + }, + "pathType": { + "description": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "type": "string" + } + }, + "required": [ + "pathType", + "backend" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.HTTPIngressRuleValue": { + "description": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "properties": { + "paths": { + "description": "paths is a collection of paths that map requests to backends.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.HTTPIngressPath" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "required": [ + "paths" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.IPBlock": { + "description": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "properties": { + "cidr": { + "description": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "type": "string" + }, + "except": { + "description": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "cidr" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.Ingress": { + "description": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressSpec", + "description": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressStatus", + "description": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "Ingress", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressBackend": { + "description": "IngressBackend describes all endpoints for a given service and port.", + "properties": { + "resource": { + "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference", + "description": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\"." + }, + "service": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressServiceBackend", + "description": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\"." + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.IngressClass": { + "description": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressClassSpec", + "description": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IngressClass", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressClassList": { + "description": "IngressClassList is a collection of IngressClasses.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of IngressClasses.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IngressClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressClassParametersReference": { + "description": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", + "properties": { + "apiGroup": { + "description": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "kind is the type of resource being referenced.", + "type": "string" + }, + "name": { + "description": "name is the name of resource being referenced.", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "type": "string" + }, + "scope": { + "description": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.IngressClassSpec": { + "description": "IngressClassSpec provides information about the class of an Ingress.", + "properties": { + "controller": { + "description": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "type": "string" + }, + "parameters": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressClassParametersReference", + "description": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters." + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.IngressList": { + "description": "IngressList is a collection of Ingress.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of Ingress.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.Ingress" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IngressList", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressLoadBalancerIngress": { + "description": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "properties": { + "hostname": { + "description": "hostname is set for load-balancer ingress points that are DNS based.", + "type": "string" + }, + "ip": { + "description": "ip is set for load-balancer ingress points that are IP based.", + "type": "string" + }, + "ports": { + "description": "ports provides information about the ports exposed by this LoadBalancer.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressPortStatus" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.IngressLoadBalancerStatus": { + "description": "IngressLoadBalancerStatus represents the status of a load-balancer.", + "properties": { + "ingress": { + "description": "ingress is a list containing ingress points for the load-balancer.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressLoadBalancerIngress" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.IngressPortStatus": { + "description": "IngressPortStatus represents the error condition of a service port", + "properties": { + "error": { + "description": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "type": "string" + }, + "port": { + "description": "port is the port number of the ingress port.", + "format": "int32", + "type": "integer" + }, + "protocol": { + "description": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "type": "string" + } + }, + "required": [ + "port", + "protocol" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.IngressRule": { + "description": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "properties": { + "host": { + "description": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "type": "string" + }, + "http": { + "$ref": "#/definitions/io.k8s.api.networking.v1.HTTPIngressRuleValue" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.IngressServiceBackend": { + "description": "IngressServiceBackend references a Kubernetes Service as a Backend.", + "properties": { + "name": { + "description": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "type": "string" + }, + "port": { + "$ref": "#/definitions/io.k8s.api.networking.v1.ServiceBackendPort", + "description": "port of the referenced service. A port name or port number is required for a IngressServiceBackend." + } + }, + "required": [ + "name" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.IngressSpec": { + "description": "IngressSpec describes the Ingress the user wishes to exist.", + "properties": { + "defaultBackend": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressBackend", + "description": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller." + }, + "ingressClassName": { + "description": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -\u003e IngressClass -\u003e Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "type": "string" + }, + "rules": { + "description": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressRule" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "tls": { + "description": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressTLS" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.IngressStatus": { + "description": "IngressStatus describe the current state of the Ingress.", + "properties": { + "loadBalancer": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressLoadBalancerStatus", + "description": "loadBalancer contains the current status of the load-balancer." + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.IngressTLS": { + "description": "IngressTLS describes the transport layer security associated with an ingress.", + "properties": { + "hosts": { + "description": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "secretName": { + "description": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.NetworkPolicy": { + "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicySpec", + "description": "spec represents the specification of the desired behavior for this NetworkPolicy." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "NetworkPolicy", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.NetworkPolicyEgressRule": { + "description": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "properties": { + "ports": { + "description": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" + }, + "type": "array" + }, + "to": { + "description": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.NetworkPolicyIngressRule": { + "description": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", + "properties": { + "from": { + "description": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" + }, + "type": "array" + }, + "ports": { + "description": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.NetworkPolicyList": { + "description": "NetworkPolicyList is a list of NetworkPolicy objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicy" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "NetworkPolicyList", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.NetworkPolicyPeer": { + "description": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", + "properties": { + "ipBlock": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IPBlock", + "description": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be." + }, + "namespaceSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector." + }, + "podSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace." + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.NetworkPolicyPort": { + "description": "NetworkPolicyPort describes a port to allow traffic on", + "properties": { + "endPort": { + "description": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", + "format": "int32", + "type": "integer" + }, + "port": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched." + }, + "protocol": { + "description": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1.NetworkPolicySpec": { + "description": "NetworkPolicySpec provides the specification of a NetworkPolicy", + "properties": { + "egress": { + "description": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyEgressRule" + }, + "type": "array" + }, + "ingress": { + "description": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyIngressRule" + }, + "type": "array" + }, + "podSelector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace." + }, + "policyTypes": { + "description": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "podSelector" + ], + "type": "object" + }, + "io.k8s.api.networking.v1.ServiceBackendPort": { + "description": "ServiceBackendPort is the service port being referenced.", + "properties": { + "name": { + "description": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "type": "string" + }, + "number": { + "description": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1alpha1.IPAddress": { + "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddressSpec", + "description": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IPAddress", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.IPAddressList": { + "description": "IPAddressList contains a list of IPAddress.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of IPAddresses.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddress" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IPAddressList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.IPAddressSpec": { + "description": "IPAddressSpec describe the attributes in an IP Address.", + "properties": { + "parentRef": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ParentReference", + "description": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object." + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1alpha1.ParentReference": { + "description": "ParentReference describes a reference to a parent object.", + "properties": { + "group": { + "description": "Group is the group of the object being referenced.", + "type": "string" + }, + "name": { + "description": "Name is the name of the object being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of the object being referenced.", + "type": "string" + }, + "resource": { + "description": "Resource is the resource of the object being referenced.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDR": { + "description": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRSpec", + "description": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + }, + "status": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRStatus", + "description": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ServiceCIDR", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDRList": { + "description": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of ServiceCIDRs.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ServiceCIDRList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDRSpec": { + "description": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "properties": { + "cidrs": { + "description": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDRStatus": { + "description": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "properties": { + "conditions": { + "description": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.api.node.v1.Overhead": { + "description": "Overhead structure represents the resource overhead associated with running a pod.", + "properties": { + "podFixed": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "description": "podFixed represents the fixed resource overhead associated with running a pod.", + "type": "object" + } + }, + "type": "object" + }, + "io.k8s.api.node.v1.RuntimeClass": { + "description": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "handler": { + "description": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node \u0026 CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "overhead": { + "$ref": "#/definitions/io.k8s.api.node.v1.Overhead", + "description": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/" + }, + "scheduling": { + "$ref": "#/definitions/io.k8s.api.node.v1.Scheduling", + "description": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes." + } + }, + "required": [ + "handler" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "node.k8s.io", + "kind": "RuntimeClass", + "version": "v1" + } + ] + }, + "io.k8s.api.node.v1.RuntimeClassList": { + "description": "RuntimeClassList is a list of RuntimeClass objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.node.v1.RuntimeClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "node.k8s.io", + "kind": "RuntimeClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.node.v1.Scheduling": { + "description": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "properties": { + "nodeSelector": { + "additionalProperties": { + "type": "string" + }, + "description": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "tolerations": { + "description": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.policy.v1.Eviction": { + "description": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods/\u003cpod name\u003e/evictions.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "deleteOptions": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions", + "description": "DeleteOptions may be provided" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "ObjectMeta describes the pod that is being evicted." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "policy", + "kind": "Eviction", + "version": "v1" + } + ] + }, + "io.k8s.api.policy.v1.PodDisruptionBudget": { + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec", + "description": "Specification of the desired behavior of the PodDisruptionBudget." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetStatus", + "description": "Most recently observed status of the PodDisruptionBudget." + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "policy", + "kind": "PodDisruptionBudget", + "version": "v1" + } + ] + }, + "io.k8s.api.policy.v1.PodDisruptionBudgetList": { + "description": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of PodDisruptionBudgets", + "items": { + "$ref": "#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "policy", + "kind": "PodDisruptionBudgetList", + "version": "v1" + } + ] + }, + "io.k8s.api.policy.v1.PodDisruptionBudgetSpec": { + "description": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "properties": { + "maxUnavailable": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\"." + }, + "minAvailable": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString", + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\"." + }, + "selector": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", + "x-kubernetes-patch-strategy": "replace" + }, + "unhealthyPodEvictionPolicy": { + "description": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.policy.v1.PodDisruptionBudgetStatus": { + "description": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "properties": { + "conditions": { + "description": "Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\n the number of allowed disruptions. Therefore no disruptions are\n allowed and the status of the condition will be False.\n- InsufficientPods: The number of pods are either at or below the number\n required by the PodDisruptionBudget. No disruptions are\n allowed and the status of the condition will be False.\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\n The condition will be True, and the number of allowed\n disruptions are provided by the disruptionsAllowed property.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentHealthy": { + "description": "current number of healthy pods", + "format": "int32", + "type": "integer" + }, + "desiredHealthy": { + "description": "minimum desired number of healthy pods", + "format": "int32", + "type": "integer" + }, + "disruptedPods": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "type": "object" + }, + "disruptionsAllowed": { + "description": "Number of pod disruptions that are currently allowed.", + "format": "int32", + "type": "integer" + }, + "expectedPods": { + "description": "total number of pods counted by this disruption budget", + "format": "int32", + "type": "integer" + }, + "observedGeneration": { + "description": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "disruptionsAllowed", + "currentHealthy", + "desiredHealthy", + "expectedPods" + ], + "type": "object" + }, + "io.k8s.api.rbac.v1.AggregationRule": { + "description": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "properties": { + "clusterRoleSelectors": { + "description": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.api.rbac.v1.ClusterRole": { + "description": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "properties": { + "aggregationRule": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.AggregationRule", + "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller." + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata." + }, + "rules": { + "description": "Rules holds all the PolicyRules for this ClusterRole", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" + }, + "type": "array" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBinding": { + "description": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata." + }, + "roleRef": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef", + "description": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable." + }, + "subjects": { + "description": "Subjects holds references to the objects the role applies to.", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" + }, + "type": "array" + } + }, + "required": [ + "roleRef" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBindingList": { + "description": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoleBindings", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleList": { + "description": "ClusterRoleList is a collection of ClusterRoles", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoles", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRole" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.PolicyRule": { + "description": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "properties": { + "apiGroups": { + "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", + "items": { + "type": "string" + }, + "type": "array" + }, + "nonResourceURLs": { + "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "items": { + "type": "string" + }, + "type": "array" + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "items": { + "type": "string" + }, + "type": "array" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to. '*' represents all resources.", + "items": { + "type": "string" + }, + "type": "array" + }, + "verbs": { + "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "verbs" + ], + "type": "object" + }, + "io.k8s.api.rbac.v1.Role": { + "description": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata." + }, + "rules": { + "description": "Rules holds all the PolicyRules for this Role", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" + }, + "type": "array" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "Role", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleBinding": { + "description": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata." + }, + "roleRef": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef", + "description": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable." + }, + "subjects": { + "description": "Subjects holds references to the objects the role applies to.", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" + }, + "type": "array" + } + }, + "required": [ + "roleRef" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "RoleBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleBindingList": { + "description": "RoleBindingList is a collection of RoleBindings", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of RoleBindings", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleBinding" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "RoleBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleList": { + "description": "RoleList is a collection of Roles", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of Roles", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Role" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata." + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "RoleList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleRef": { + "description": "RoleRef contains information that points to the role being used", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + } + }, + "required": [ + "apiGroup", + "kind", + "name" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.rbac.v1.Subject": { + "description": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "properties": { + "apiGroup": { + "description": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "type": "string" + }, + "kind": { + "description": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "type": "string" + }, + "name": { + "description": "Name of the object being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.resource.v1alpha2.AllocationResult": { + "description": "AllocationResult contains attributes of an allocated resource.", + "properties": { + "availableOnNodes": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere." + }, + "resourceHandles": { + "description": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceHandle" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "shareable": { + "description": "Shareable determines whether the resource supports more than one consumer at a time.", + "type": "boolean" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContext": { + "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec", + "description": "Spec describes where resources for the Pod are needed." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus", + "description": "Status describes where resources for the Pod can be allocated." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "PodSchedulingContext", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContextList": { + "description": "PodSchedulingContextList is a collection of Pod scheduling objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of PodSchedulingContext objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "PodSchedulingContextList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec": { + "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.", + "properties": { + "potentialNodes": { + "description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "selectedNode": { + "description": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus": { + "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", + "properties": { + "resourceClaims": { + "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceClaim": { + "description": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec", + "description": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimStatus", + "description": "Status describes whether the resource is available and with which attributes." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaim", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference": { + "description": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced.", + "type": "string" + }, + "resource": { + "description": "Resource is the type of resource being referenced, for example \"pods\".", + "type": "string" + }, + "uid": { + "description": "UID identifies exactly one incarnation of the resource.", + "type": "string" + } + }, + "required": [ + "resource", + "name", + "uid" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimList": { + "description": "ResourceClaimList is a collection of claims.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of resource claims.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaimList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference": { + "description": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced.", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus": { + "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", + "properties": { + "name": { + "description": "Name matches the pod.spec.resourceClaims[*].Name field.", + "type": "string" + }, + "unsuitableNodes": { + "description": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimSpec": { + "description": "ResourceClaimSpec defines how a resource is to be allocated.", + "properties": { + "allocationMode": { + "description": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", + "type": "string" + }, + "parametersRef": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference", + "description": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim." + }, + "resourceClassName": { + "description": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", + "type": "string" + } + }, + "required": [ + "resourceClassName" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimStatus": { + "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", + "properties": { + "allocation": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.AllocationResult", + "description": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet." + }, + "deallocationRequested": { + "description": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", + "type": "boolean" + }, + "driverName": { + "description": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", + "type": "string" + }, + "reservedFor": { + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "uid" + ], + "x-kubernetes-list-type": "map" + } + }, + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplate": { + "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec", + "description": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaimTemplate", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList": { + "description": "ResourceClaimTemplateList is a collection of claim templates.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of resource claim templates.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaimTemplateList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec": { + "description": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "properties": { + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec", + "description": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here." + } + }, + "required": [ + "spec" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceClass": { + "description": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata" + }, + "parametersRef": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClassParametersReference", + "description": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec." + }, + "suitableNodes": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates." + } + }, + "required": [ + "driverName" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClass", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClassList": { + "description": "ResourceClassList is a collection of classes.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of resource classes.", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClassList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClassParametersReference": { + "description": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", + "type": "string" + } + }, + "required": [ + "kind", + "name" + ], + "type": "object" + }, + "io.k8s.api.resource.v1alpha2.ResourceHandle": { + "description": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", + "properties": { + "data": { + "description": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", + "type": "string" + }, + "driverName": { + "description": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.scheduling.v1.PriorityClass": { + "description": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "description": { + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "type": "string" + }, + "globalDefault": { + "description": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "type": "boolean" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "preemptionPolicy": { + "description": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "type": "string" + }, + "value": { + "description": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "value" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "scheduling.k8s.io", + "kind": "PriorityClass", + "version": "v1" + } + ] + }, + "io.k8s.api.scheduling.v1.PriorityClassList": { + "description": "PriorityClassList is a collection of priority classes.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of PriorityClasses", + "items": { + "$ref": "#/definitions/io.k8s.api.scheduling.v1.PriorityClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "scheduling.k8s.io", + "kind": "PriorityClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIDriver": { + "description": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSIDriverSpec", + "description": "spec represents the specification of the CSI Driver." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIDriver", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIDriverList": { + "description": "CSIDriverList is a collection of CSIDriver objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CSIDriver", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSIDriver" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIDriverList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIDriverSpec": { + "description": "CSIDriverSpec is the specification of a CSIDriver.", + "properties": { + "attachRequired": { + "description": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", + "type": "boolean" + }, + "fsGroupPolicy": { + "description": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "type": "string" + }, + "podInfoOnMount": { + "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "type": "boolean" + }, + "requiresRepublish": { + "description": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "type": "boolean" + }, + "seLinuxMount": { + "description": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "type": "boolean" + }, + "storageCapacity": { + "description": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes \u003c= 1.22 and now is mutable.", + "type": "boolean" + }, + "tokenRequests": { + "description": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\u003caudience\u003e\": {\n \"token\": \u003ctoken\u003e,\n \"expirationTimestamp\": \u003cexpiration timestamp in RFC3339\u003e,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.TokenRequest" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "volumeLifecycleModes": { + "description": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-list-type": "set" + } + }, + "type": "object" + }, + "io.k8s.api.storage.v1.CSINode": { + "description": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. metadata.name must be the Kubernetes node name." + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSINodeSpec", + "description": "spec is the specification of CSINode" + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSINode", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSINodeDriver": { + "description": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "properties": { + "allocatable": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeNodeResources", + "description": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta." + }, + "name": { + "description": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "type": "string" + }, + "nodeID": { + "description": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "type": "string" + }, + "topologyKeys": { + "description": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "name", + "nodeID" + ], + "type": "object" + }, + "io.k8s.api.storage.v1.CSINodeList": { + "description": "CSINodeList is a collection of CSINode objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CSINode", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSINode" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSINodeList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSINodeSpec": { + "description": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "properties": { + "drivers": { + "description": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSINodeDriver" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "required": [ + "drivers" + ], + "type": "object" + }, + "io.k8s.api.storage.v1.CSIStorageCapacity": { + "description": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "capacity": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable." + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "maximumVolumeSize": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity", + "description": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim." + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-\u003cuuid\u003e, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "nodeTopology": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector", + "description": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable." + }, + "storageClassName": { + "description": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "type": "string" + } + }, + "required": [ + "storageClassName" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIStorageCapacity", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIStorageCapacityList": { + "description": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CSIStorageCapacity objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSIStorageCapacity" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIStorageCapacityList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.StorageClass": { + "description": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "properties": { + "allowVolumeExpansion": { + "description": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "type": "boolean" + }, + "allowedTopologies": { + "description": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.TopologySelectorTerm" + }, + "type": "array", + "x-kubernetes-list-type": "atomic" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "mountOptions": { + "description": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "items": { + "type": "string" + }, + "type": "array" + }, + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "type": "object" + }, + "provisioner": { + "description": "provisioner indicates the type of the provisioner.", + "type": "string" + }, + "reclaimPolicy": { + "description": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "type": "string" + }, + "volumeBindingMode": { + "description": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "type": "string" + } + }, + "required": [ + "provisioner" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "StorageClass", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.StorageClassList": { + "description": "StorageClassList is a collection of storage classes.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of StorageClasses", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.StorageClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "StorageClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.TokenRequest": { + "description": "TokenRequest contains parameters of a service account token.", + "properties": { + "audience": { + "description": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "type": "string" + }, + "expirationSeconds": { + "description": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "format": "int64", + "type": "integer" + } + }, + "required": [ + "audience" + ], + "type": "object" + }, + "io.k8s.api.storage.v1.VolumeAttachment": { + "description": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSpec", + "description": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system." + }, + "status": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachmentStatus", + "description": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher." + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttachment", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.VolumeAttachmentList": { + "description": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttachments", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachment" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttachmentList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.VolumeAttachmentSource": { + "description": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "properties": { + "inlineVolumeSpec": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec", + "description": "inlineVolumeSpec contains all the information necessary to attach a persistent volume defined by a pod's inline VolumeSource. This field is populated only for the CSIMigration feature. It contains translated fields from a pod's inline VolumeSource to a PersistentVolumeSpec. This field is beta-level and is only honored by servers that enabled the CSIMigration feature." + }, + "persistentVolumeName": { + "description": "persistentVolumeName represents the name of the persistent volume to attach.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.api.storage.v1.VolumeAttachmentSpec": { + "description": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "properties": { + "attacher": { + "description": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "type": "string" + }, + "nodeName": { + "description": "nodeName represents the node that the volume should be attached to.", + "type": "string" + }, + "source": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSource", + "description": "source represents the volume that should be attached." + } + }, + "required": [ + "attacher", + "source", + "nodeName" + ], + "type": "object" + }, + "io.k8s.api.storage.v1.VolumeAttachmentStatus": { + "description": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "properties": { + "attachError": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeError", + "description": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher." + }, + "attached": { + "description": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "type": "boolean" + }, + "attachmentMetadata": { + "additionalProperties": { + "type": "string" + }, + "description": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "type": "object" + }, + "detachError": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeError", + "description": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher." + } + }, + "required": [ + "attached" + ], + "type": "object" + }, + "io.k8s.api.storage.v1.VolumeError": { + "description": "VolumeError captures an error encountered during a volume operation.", + "properties": { + "message": { + "description": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "type": "string" + }, + "time": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "time represents the time the error was encountered." + } + }, + "type": "object" + }, + "io.k8s.api.storage.v1.VolumeNodeResources": { + "description": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "properties": { + "count": { + "description": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClass": { + "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "Name of the CSI driver This field is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "parameters": { + "additionalProperties": { + "type": "string" + }, + "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", + "type": "object" + } + }, + "required": [ + "driverName" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClass", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClassList": { + "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttributesClass objects.", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1alpha1.VolumeAttributesClass" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClassList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition": { + "description": "CustomResourceColumnDefinition specifies a column for server side printing.", + "properties": { + "description": { + "description": "description is a human readable description of this column.", + "type": "string" + }, + "format": { + "description": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.", + "type": "string" + }, + "jsonPath": { + "description": "jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.", + "type": "string" + }, + "name": { + "description": "name is a human readable name for the column.", + "type": "string" + }, + "priority": { + "description": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.", + "format": "int32", + "type": "integer" + }, + "type": { + "description": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.", + "type": "string" + } + }, + "required": [ + "name", + "type", + "jsonPath" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion": { + "description": "CustomResourceConversion describes how to convert different versions of a CR.", + "properties": { + "strategy": { + "description": "strategy specifies how custom resources are converted between versions. Allowed values are: - `\"None\"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `\"Webhook\"`: API Server will call to an external webhook to do the conversion. Additional information\n is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.", + "type": "string" + }, + "webhook": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion", + "description": "webhook describes how to call the conversion webhook. Required when `strategy` is set to `\"Webhook\"`." + } + }, + "required": [ + "strategy" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition": { + "description": "CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format \u003c.spec.name\u003e.\u003c.spec.group\u003e.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec", + "description": "spec describes how the user wants the resources to appear" + }, + "status": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus", + "description": "status indicates the actual state of the CustomResourceDefinition" + } + }, + "required": [ + "spec" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apiextensions.k8s.io", + "kind": "CustomResourceDefinition", + "version": "v1" + } + ] + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition": { + "description": "CustomResourceDefinitionCondition contains details for the current condition of this pod.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastTransitionTime last time the condition transitioned from one status to another." + }, + "message": { + "description": "message is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "reason is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "status is the status of the condition. Can be True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "type is the type of the condition. Types include Established, NamesAccepted and Terminating.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList": { + "description": "CustomResourceDefinitionList is a list of CustomResourceDefinition objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items list individual CustomResourceDefinition objects", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apiextensions.k8s.io", + "kind": "CustomResourceDefinitionList", + "version": "v1" + } + ] + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames": { + "description": "CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition", + "properties": { + "categories": { + "description": "categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.", + "items": { + "type": "string" + }, + "type": "array" + }, + "kind": { + "description": "kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.", + "type": "string" + }, + "listKind": { + "description": "listKind is the serialized kind of the list for this resource. Defaults to \"`kind`List\".", + "type": "string" + }, + "plural": { + "description": "plural is the plural name of the resource to serve. The custom resources are served under `/apis/\u003cgroup\u003e/\u003cversion\u003e/.../\u003cplural\u003e`. Must match the name of the CustomResourceDefinition (in the form `\u003cnames.plural\u003e.\u003cgroup\u003e`). Must be all lowercase.", + "type": "string" + }, + "shortNames": { + "description": "shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get \u003cshortname\u003e`. It must be all lowercase.", + "items": { + "type": "string" + }, + "type": "array" + }, + "singular": { + "description": "singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.", + "type": "string" + } + }, + "required": [ + "plural", + "kind" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec": { + "description": "CustomResourceDefinitionSpec describes how a user wants their resource to appear", + "properties": { + "conversion": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion", + "description": "conversion defines conversion settings for the CRD." + }, + "group": { + "description": "group is the API group of the defined custom resource. The custom resources are served under `/apis/\u003cgroup\u003e/...`. Must match the name of the CustomResourceDefinition (in the form `\u003cnames.plural\u003e.\u003cgroup\u003e`).", + "type": "string" + }, + "names": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames", + "description": "names specify the resource and kind names for the custom resource." + }, + "preserveUnknownFields": { + "description": "preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.", + "type": "boolean" + }, + "scope": { + "description": "scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.", + "type": "string" + }, + "versions": { + "description": "versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion" + }, + "type": "array" + } + }, + "required": [ + "group", + "names", + "scope", + "versions" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus": { + "description": "CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition", + "properties": { + "acceptedNames": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames", + "description": "acceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec." + }, + "conditions": { + "description": "conditions indicate state for particular aspects of a CustomResourceDefinition", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "storedVersions": { + "description": "storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion": { + "description": "CustomResourceDefinitionVersion describes a version for CRD.", + "properties": { + "additionalPrinterColumns": { + "description": "additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used.", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition" + }, + "type": "array" + }, + "deprecated": { + "description": "deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.", + "type": "boolean" + }, + "deprecationWarning": { + "description": "deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists.", + "type": "string" + }, + "name": { + "description": "name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/\u003cgroup\u003e/\u003cversion\u003e/...` if `served` is true.", + "type": "string" + }, + "schema": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation", + "description": "schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource." + }, + "served": { + "description": "served is a flag enabling/disabling this version from being served via REST APIs", + "type": "boolean" + }, + "storage": { + "description": "storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.", + "type": "boolean" + }, + "subresources": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources", + "description": "subresources specify what subresources this version of the defined custom resource have." + } + }, + "required": [ + "name", + "served", + "storage" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale": { + "description": "CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.", + "properties": { + "labelSelectorPath": { + "description": "labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.", + "type": "string" + }, + "specReplicasPath": { + "description": "specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.", + "type": "string" + }, + "statusReplicasPath": { + "description": "statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.", + "type": "string" + } + }, + "required": [ + "specReplicasPath", + "statusReplicasPath" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus": { + "description": "CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza", + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources": { + "description": "CustomResourceSubresources defines the status and scale subresources for CustomResources.", + "properties": { + "scale": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale", + "description": "scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object." + }, + "status": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus", + "description": "status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object." + } + }, + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation": { + "description": "CustomResourceValidation is a list of validation methods for CustomResources.", + "properties": { + "openAPIV3Schema": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps", + "description": "openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning." + } + }, + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation": { + "description": "ExternalDocumentation allows referencing an external resource for extended documentation.", + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON": { + "description": "JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps": { + "description": "JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).", + "properties": { + "$ref": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "additionalItems": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool" + }, + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool" + }, + "allOf": { + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "type": "array" + }, + "anyOf": { + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "type": "array" + }, + "default": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON", + "description": "default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false." + }, + "definitions": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "type": "object" + }, + "dependencies": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray" + }, + "type": "object" + }, + "description": { + "type": "string" + }, + "enum": { + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + }, + "type": "array" + }, + "example": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + }, + "exclusiveMaximum": { + "type": "boolean" + }, + "exclusiveMinimum": { + "type": "boolean" + }, + "externalDocs": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation" + }, + "format": { + "description": "format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:\n\n- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like \"0321751043\" or \"978-0321751041\" - isbn10: an ISBN10 number string like \"0321751043\" - isbn13: an ISBN13 number string like \"978-0321751041\" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like \"#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like \"rgb(255,255,2559\" - byte: base64 encoded binary data - password: any kind of string - date: a date string like \"2006-01-02\" as defined by full-date in RFC3339 - duration: a duration string like \"22 ns\" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like \"2014-12-15T19:30:20.000Z\" as defined by date-time in RFC3339.", + "type": "string" + }, + "id": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray" + }, + "maxItems": { + "format": "int64", + "type": "integer" + }, + "maxLength": { + "format": "int64", + "type": "integer" + }, + "maxProperties": { + "format": "int64", + "type": "integer" + }, + "maximum": { + "format": "double", + "type": "number" + }, + "minItems": { + "format": "int64", + "type": "integer" + }, + "minLength": { + "format": "int64", + "type": "integer" + }, + "minProperties": { + "format": "int64", + "type": "integer" + }, + "minimum": { + "format": "double", + "type": "number" + }, + "multipleOf": { + "format": "double", + "type": "number" + }, + "not": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "nullable": { + "type": "boolean" + }, + "oneOf": { + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "type": "array" + }, + "pattern": { + "type": "string" + }, + "patternProperties": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "type": "object" + }, + "properties": { + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "type": "object" + }, + "required": { + "items": { + "type": "string" + }, + "type": "array" + }, + "title": { + "type": "string" + }, + "type": { + "type": "string" + }, + "uniqueItems": { + "type": "boolean" + }, + "x-kubernetes-embedded-resource": { + "description": "x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).", + "type": "boolean" + }, + "x-kubernetes-int-or-string": { + "description": "x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:\n\n1) anyOf:\n - type: integer\n - type: string\n2) allOf:\n - anyOf:\n - type: integer\n - type: string\n - ... zero or more", + "type": "boolean" + }, + "x-kubernetes-list-map-keys": { + "description": "x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map.\n\nThis tag MUST only be used on lists that have the \"x-kubernetes-list-type\" extension set to \"map\". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported).\n\nThe properties specified must either be required or have a default value, to ensure those properties are present for all list items.", + "items": { + "type": "string" + }, + "type": "array" + }, + "x-kubernetes-list-type": { + "description": "x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:\n\n1) `atomic`: the list is treated as a single entity, like a scalar.\n Atomic lists will be entirely replaced when updated. This extension\n may be used on any type of list (struct, scalar, ...).\n2) `set`:\n Sets are lists that must not have multiple items with the same value. Each\n value must be a scalar, an object with x-kubernetes-map-type `atomic` or an\n array with x-kubernetes-list-type `atomic`.\n3) `map`:\n These lists are like maps in that their elements have a non-index key\n used to identify them. Order is preserved upon merge. The map tag\n must only be used on a list with elements of type object.\nDefaults to atomic for arrays.", + "type": "string" + }, + "x-kubernetes-map-type": { + "description": "x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values:\n\n1) `granular`:\n These maps are actual maps (key-value pairs) and each fields are independent\n from each other (they can each be manipulated by separate actors). This is\n the default behaviour for all maps.\n2) `atomic`: the list is treated as a single entity, like a scalar.\n Atomic maps will be entirely replaced when updated.", + "type": "string" + }, + "x-kubernetes-preserve-unknown-fields": { + "description": "x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.", + "type": "boolean" + }, + "x-kubernetes-validations": { + "description": "x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ValidationRule" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "rule" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "rule", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray": { + "description": "JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool": { + "description": "JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray": { + "description": "JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReference": { + "description": "ServiceReference holds a reference to Service.legacy.k8s.io", + "properties": { + "name": { + "description": "name is the name of the service. Required", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the service. Required", + "type": "string" + }, + "path": { + "description": "path is an optional URL path at which the webhook will be contacted.", + "type": "string" + }, + "port": { + "description": "port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "namespace", + "name" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ValidationRule": { + "description": "ValidationRule describes a validation rule written in the CEL expression language.", + "properties": { + "fieldPath": { + "description": "fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\"", + "type": "string" + }, + "messageExpression": { + "description": "MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: \"x must be less than max (\"+string(self.max)+\")\"", + "type": "string" + }, + "optionalOldSelf": { + "description": "optionalOldSelf is used to opt a transition rule into evaluation even when the object is first created, or if the old object is missing the value.\n\nWhen enabled `oldSelf` will be a CEL optional whose value will be `None` if there is no old value, or when the object is initially created.\n\nYou may check for presence of oldSelf using `oldSelf.hasValue()` and unwrap it after checking using `oldSelf.value()`. Check the CEL documentation for Optional types for more information: https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes\n\nMay not be set unless `oldSelf` is used in `rule`.", + "type": "boolean" + }, + "reason": { + "description": "reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: \"FieldValueInvalid\", \"FieldValueForbidden\", \"FieldValueRequired\", \"FieldValueDuplicate\". If not set, default to use \"FieldValueInvalid\". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.", + "type": "string" + }, + "rule": { + "description": "Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {\"rule\": \"self.status.actual \u003c= self.spec.maxDesired\"}\n\nIf the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {\"rule\": \"self.components['Widget'].priority \u003c 10\"} - Rule scoped to a list of integers: {\"rule\": \"self.values.all(value, value \u003e= 0 \u0026\u0026 value \u003c 100)\"} - Rule scoped to a string value: {\"rule\": \"self.startsWith('kube')\"}\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible.\n\nUnknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an \"unknown type\". An \"unknown type\" is recursively defined as:\n - A schema with no type and x-kubernetes-preserve-unknown-fields set to true\n - An array where the items schema is of an \"unknown type\"\n - An object where the additionalProperties schema is of an \"unknown type\"\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Rule accessing a property named \"namespace\": {\"rule\": \"self.__namespace__ \u003e 0\"}\n - Rule accessing a property named \"x-prop\": {\"rule\": \"self.x__dash__prop \u003e 0\"}\n - Rule accessing a property named \"redact__d\": {\"rule\": \"self.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\n\nIf `rule` makes use of the `oldSelf` variable it is implicitly a `transition rule`.\n\nBy default, the `oldSelf` variable is the same type as `self`. When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional\n variable whose value() is the same type as `self`.\nSee the documentation for the `optionalOldSelf` field for details.\n\nTransition rules by default are applied only on UPDATE requests and are skipped if an old value could not be found. You can opt a transition rule into unconditional evaluation by setting `optionalOldSelf` to true.", + "type": "string" + } + }, + "required": [ + "rule" + ], + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig": { + "description": "WebhookClientConfig contains the information to make a TLS connection with the webhook.", + "properties": { + "caBundle": { + "description": "caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", + "format": "byte", + "type": "string" + }, + "service": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReference", + "description": "service is a reference to the service for this webhook. Either service or url must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`." + }, + "url": { + "description": "url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion": { + "description": "WebhookConversion describes how to call a conversion webhook", + "properties": { + "clientConfig": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig", + "description": "clientConfig is the instructions for how to call the webhook if strategy is `Webhook`." + }, + "conversionReviewVersions": { + "description": "conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "conversionReviewVersions" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.api.resource.Quantity": { + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n\n\t(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "type": "string" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": { + "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "name is the name of the group.", + "type": "string" + }, + "preferredVersion": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery", + "description": "preferredVersion is the version preferred by the API server, which probably is the storage version." + }, + "serverAddressByClientCIDRs": { + "description": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" + }, + "type": "array" + }, + "versions": { + "description": "versions are the versions supported in this group.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery" + }, + "type": "array" + } + }, + "required": [ + "name", + "versions" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIGroup", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList": { + "description": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "groups": { + "description": "groups is a list of APIGroup.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + } + }, + "required": [ + "groups" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIGroupList", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": { + "description": "APIResource specifies the name of a resource and whether it is namespaced.", + "properties": { + "categories": { + "description": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "items": { + "type": "string" + }, + "type": "array" + }, + "group": { + "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "type": "string" + }, + "kind": { + "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "type": "string" + }, + "name": { + "description": "name is the plural name of the resource.", + "type": "string" + }, + "namespaced": { + "description": "namespaced indicates if a resource is namespaced or not.", + "type": "boolean" + }, + "shortNames": { + "description": "shortNames is a list of suggested short names of the resource.", + "items": { + "type": "string" + }, + "type": "array" + }, + "singularName": { + "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "type": "string" + }, + "storageVersionHash": { + "description": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + "type": "string" + }, + "verbs": { + "description": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "items": { + "type": "string" + }, + "type": "array" + }, + "version": { + "description": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "type": "string" + } + }, + "required": [ + "name", + "singularName", + "namespaced", + "kind", + "verbs" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList": { + "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "groupVersion": { + "description": "groupVersion is the group and version this APIResourceList is for.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "resources": { + "description": "resources contains the name of the resources and if they are namespaced.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource" + }, + "type": "array" + } + }, + "required": [ + "groupVersion", + "resources" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIResourceList", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions": { + "description": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "serverAddressByClientCIDRs": { + "description": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" + }, + "type": "array" + }, + "versions": { + "description": "versions are the api versions that are available.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "versions", + "serverAddressByClientCIDRs" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIVersions", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Condition": { + "description": "Condition contains details for one aspect of the current state of this API Resource.", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable." + }, + "message": { + "description": "message is a human readable message indicating details about the transition. This may be an empty string.", + "type": "string" + }, + "observedGeneration": { + "description": "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.", + "format": "int64", + "type": "integer" + }, + "reason": { + "description": "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.", + "type": "string" + }, + "status": { + "description": "status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "type of condition in CamelCase or in foo.example.com/CamelCase.", + "type": "string" + } + }, + "required": [ + "type", + "status", + "lastTransitionTime", + "reason", + "message" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions": { + "description": "DeleteOptions may be provided when deleting an API object.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "dryRun": { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "items": { + "type": "string" + }, + "type": "array" + }, + "gracePeriodSeconds": { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "format": "int64", + "type": "integer" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "orphanDependents": { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "type": "boolean" + }, + "preconditions": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions", + "description": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned." + }, + "propagationPolicy": { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1beta2" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2beta1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2beta2" + }, + { + "group": "batch", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "batch", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "discovery.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "discovery.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "events.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "events.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "extensions", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta2" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta3" + }, + { + "group": "imagepolicy.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "internal.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "policy", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "policy", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "resource.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha2" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery": { + "description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + "properties": { + "groupVersion": { + "description": "groupVersion specifies the API group and version in the form \"group/version\"", + "type": "string" + }, + "version": { + "description": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", + "type": "string" + } + }, + "required": [ + "groupVersion", + "version" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector": { + "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "properties": { + "matchExpressions": { + "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" + }, + "type": "array" + }, + "matchLabels": { + "additionalProperties": { + "type": "string" + }, + "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "type": "object" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement": { + "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "properties": { + "key": { + "description": "key is the label key that the selector applies to.", + "type": "string" + }, + "operator": { + "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", + "type": "string" + }, + "values": { + "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", + "items": { + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": { + "description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "properties": { + "continue": { + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "type": "string" + }, + "remainingItemCount": { + "description": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", + "format": "int64", + "type": "integer" + }, + "resourceVersion": { + "description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "selfLink": { + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry": { + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "type": "string" + }, + "fieldsType": { + "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "type": "string" + }, + "fieldsV1": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1", + "description": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type." + }, + "manager": { + "description": "Manager is an identifier of the workflow managing these fields.", + "type": "string" + }, + "operation": { + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "type": "string" + }, + "subresource": { + "description": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.", + "type": "string" + }, + "time": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over." + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime": { + "description": "MicroTime is version of Time with microsecond level precision.", + "format": "date-time", + "type": "string" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": { + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "properties": { + "annotations": { + "additionalProperties": { + "type": "string" + }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", + "type": "object" + }, + "creationTimestamp": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "deletionGracePeriodSeconds": { + "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "format": "int64", + "type": "integer" + }, + "deletionTimestamp": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "finalizers": { + "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", + "items": { + "type": "string" + }, + "type": "array", + "x-kubernetes-patch-strategy": "merge" + }, + "generateName": { + "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "type": "string" + }, + "generation": { + "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "format": "int64", + "type": "integer" + }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", + "type": "object" + }, + "managedFields": { + "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry" + }, + "type": "array" + }, + "name": { + "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "type": "string" + }, + "namespace": { + "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", + "type": "string" + }, + "ownerReferences": { + "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference" + }, + "type": "array", + "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-patch-strategy": "merge" + }, + "resourceVersion": { + "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "selfLink": { + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "type": "string" + }, + "uid": { + "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference": { + "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "properties": { + "apiVersion": { + "description": "API version of the referent.", + "type": "string" + }, + "blockOwnerDeletion": { + "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + "type": "boolean" + }, + "controller": { + "description": "If true, this reference points to the managing controller.", + "type": "boolean" + }, + "kind": { + "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "type": "string" + }, + "uid": { + "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" + } + }, + "required": [ + "apiVersion", + "kind", + "name", + "uid" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Patch": { + "description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions": { + "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "properties": { + "resourceVersion": { + "description": "Specifies the target ResourceVersion", + "type": "string" + }, + "uid": { + "description": "Specifies the target UID.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR": { + "description": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + "properties": { + "clientCIDR": { + "description": "The CIDR with which clients can match their IP to figure out the server address that they should use.", + "type": "string" + }, + "serverAddress": { + "description": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", + "type": "string" + } + }, + "required": [ + "clientCIDR", + "serverAddress" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Status": { + "description": "Status is a return value for calls that don't return other objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "code": { + "description": "Suggested HTTP return code for this status, 0 if not set.", + "format": "int32", + "type": "integer" + }, + "details": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails", + "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type." + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "message": { + "description": "A human-readable description of the status of this operation.", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + }, + "reason": { + "description": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "type": "string" + }, + "status": { + "description": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Status", + "version": "v1" + }, + { + "group": "resource.k8s.io", + "kind": "Status", + "version": "v1alpha2" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause": { + "description": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "properties": { + "field": { + "description": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", + "type": "string" + }, + "message": { + "description": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "type": "string" + }, + "reason": { + "description": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails": { + "description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "properties": { + "causes": { + "description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause" + }, + "type": "array" + }, + "group": { + "description": "The group attribute of the resource associated with the status StatusReason.", + "type": "string" + }, + "kind": { + "description": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "type": "string" + }, + "retryAfterSeconds": { + "description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", + "format": "int32", + "type": "integer" + }, + "uid": { + "description": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Time": { + "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", + "format": "date-time", + "type": "string" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent": { + "description": "Event represents a single event to a watched resource.", + "properties": { + "object": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension", + "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." + }, + "type": { + "type": "string" + } + }, + "required": [ + "type", + "object" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1beta2" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2beta1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2beta2" + }, + { + "group": "batch", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "batch", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "discovery.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "discovery.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "events.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "events.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "extensions", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta2" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta3" + }, + { + "group": "imagepolicy.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "internal.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "policy", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "policy", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "resource.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha2" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + } + ] + }, + "io.k8s.apimachinery.pkg.runtime.RawExtension": { + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "type": "object" + }, + "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { + "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", + "format": "int-or-string", + "type": [ + "integer", + "string" + ] + }, + "io.k8s.apimachinery.pkg.version.Info": { + "description": "Info contains versioning information. how we'll want to distribute that information.", + "properties": { + "buildDate": { + "type": "string" + }, + "compiler": { + "type": "string" + }, + "gitCommit": { + "type": "string" + }, + "gitTreeState": { + "type": "string" + }, + "gitVersion": { + "type": "string" + }, + "goVersion": { + "type": "string" + }, + "major": { + "type": "string" + }, + "minor": { + "type": "string" + }, + "platform": { + "type": "string" + } + }, + "required": [ + "major", + "minor", + "gitVersion", + "gitCommit", + "gitTreeState", + "buildDate", + "goVersion", + "compiler", + "platform" + ], + "type": "object" + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService": { + "description": "APIService represents a server for a particular GroupVersion. Name must be \"version.group\".", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec", + "description": "Spec contains information for locating and communicating with a server" + }, + "status": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus", + "description": "Status contains derived information about an API server" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1" + } + ] + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition": { + "description": "APIServiceCondition describes the state of an APIService at a particular point", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time", + "description": "Last time the condition transitioned from one status to another." + }, + "message": { + "description": "Human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "Unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status is the status of the condition. Can be True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type is the type of the condition.", + "type": "string" + } + }, + "required": [ + "type", + "status" + ], + "type": "object" + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList": { + "description": "APIServiceList is a list of APIService objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of APIService", + "items": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIServiceList", + "version": "v1" + } + ] + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec": { + "description": "APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.", + "properties": { + "caBundle": { + "description": "CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.", + "format": "byte", + "type": "string", + "x-kubernetes-list-type": "atomic" + }, + "group": { + "description": "Group is the API group name this server hosts", + "type": "string" + }, + "groupPriorityMinimum": { + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "format": "int32", + "type": "integer" + }, + "insecureSkipTLSVerify": { + "description": "InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.", + "type": "boolean" + }, + "service": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference", + "description": "Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled." + }, + "version": { + "description": "Version is the API version this server hosts. For example, \"v1\"", + "type": "string" + }, + "versionPriority": { + "description": "VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "groupPriorityMinimum", + "versionPriority" + ], + "type": "object" + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus": { + "description": "APIServiceStatus contains derived information about an API server", + "properties": { + "conditions": { + "description": "Current service state of apiService.", + "items": { + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition" + }, + "type": "array", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + }, + "type": "object" + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference": { + "description": "ServiceReference holds a reference to Service.legacy.k8s.io", + "properties": { + "name": { + "description": "Name is the name of the service", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of the service", + "type": "string" + }, + "port": { + "description": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", + "format": "int32", + "type": "integer" + } + }, + "type": "object" + } + }, + "oneOf": [ + { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.EventBus" + }, + { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSource" + }, + { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.Sensor" + } + ], + "type": "object" +} diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b18993ba77..fdfb33903e 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Argo Events", - "version": "v1.4.0" + "version": "v1.9.2-cap-CR-24607" }, "paths": {}, "definitions": { @@ -119,6 +119,9 @@ "bucket": { "$ref": "#/definitions/io.argoproj.common.S3Bucket" }, + "caCertificate": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, "endpoint": { "type": "string" }, @@ -197,6 +200,29 @@ } } }, + "io.argoproj.common.SchemaRegistryConfig": { + "description": "SchemaRegistryConfig refers to configuration for a client", + "type": "object", + "required": [ + "url", + "schemaId" + ], + "properties": { + "auth": { + "description": "SchemaRegistry - basic authentication", + "$ref": "#/definitions/io.argoproj.common.BasicAuth" + }, + "schemaId": { + "description": "Schema ID", + "type": "integer", + "format": "int32" + }, + "url": { + "description": "Schema Registry URL.", + "type": "string" + } + } + }, "io.argoproj.common.SecureHeader": { "description": "SecureHeader refers to HTTP Headers with auth tokens as values", "type": "object", @@ -229,29 +255,21 @@ "description": "TLSConfig refers to TLS configuration for a client.", "type": "object", "properties": { - "caCertPath": { - "description": "DeprecatedCACertPath refers the file path that contains the CA cert. Deprecated: will be removed in v1.5, use CACertSecret instead", - "type": "string" - }, "caCertSecret": { "description": "CACertSecret refers to the secret that contains the CA cert", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, - "clientCertPath": { - "description": "DeprecatedClientCertPath refers the file path that contains client cert. Deprecated: will be removed in v1.5, use ClientCertSecret instead", - "type": "string" - }, "clientCertSecret": { "description": "ClientCertSecret refers to the secret that contains the client cert", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, - "clientKeyPath": { - "description": "DeprecatedClientKeyPath refers the file path that contains client key. Deprecated: will be removed in v1.5, use ClientKeySecret instead", - "type": "string" - }, "clientKeySecret": { "description": "ClientKeySecret refers to the secret that contains the client key", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "insecureSkipVerify": { + "description": "If true, skips creation of TLSConfig with certs and creates an empty TLSConfig. (Defaults to false)", + "type": "boolean" } } }, @@ -271,6 +289,12 @@ "description": "BusConfig has the finalized configuration for EventBus", "type": "object", "properties": { + "jetstream": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamConfig" + }, + "kafka": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus" + }, "nats": { "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSConfig" } @@ -280,8 +304,14 @@ "description": "ContainerTemplate defines customized spec for a container", "type": "object", "properties": { + "imagePullPolicy": { + "type": "string" + }, "resources": { "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "securityContext": { + "$ref": "#/definitions/io.k8s.api.core.v1.SecurityContext" } } }, @@ -343,6 +373,17 @@ "description": "EventBusSpec refers to specification of eventbus resource", "type": "object", "properties": { + "jetstream": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamBus" + }, + "jetstreamExotic": { + "description": "Exotic JetStream", + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.JetStreamConfig" + }, + "kafka": { + "description": "Kafka eventbus", + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaBus" + }, "nats": { "description": "NATS eventbus", "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.NATSBus" @@ -368,6 +409,166 @@ } } }, + "io.argoproj.eventbus.v1alpha1.JetStreamBus": { + "description": "JetStreamBus holds the JetStream EventBus information", + "type": "object", + "properties": { + "affinity": { + "description": "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/", + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" + }, + "containerTemplate": { + "description": "ContainerTemplate contains customized spec for Nats JetStream container", + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "maxPayload": { + "description": "Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB", + "type": "string" + }, + "metadata": { + "description": "Metadata sets the pods's metadata, i.e. annotations and labels", + "$ref": "#/definitions/io.argoproj.common.Metadata" + }, + "metricsContainerTemplate": { + "description": "MetricsContainerTemplate contains customized spec for metrics container", + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate" + }, + "nodeSelector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "persistence": { + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.PersistenceStrategy" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "integer", + "format": "int32" + }, + "priorityClassName": { + "description": "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + "type": "string" + }, + "reloaderContainerTemplate": { + "description": "ReloaderContainerTemplate contains customized spec for config reloader container", + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.ContainerTemplate" + }, + "replicas": { + "description": "JetStream StatefulSet size", + "type": "integer", + "format": "int32" + }, + "securityContext": { + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" + }, + "serviceAccountName": { + "description": "ServiceAccountName to apply to the StatefulSet", + "type": "string" + }, + "settings": { + "description": "JetStream configuration, if not specified, global settings in controller-config will be used. See https://docs.nats.io/running-a-nats-service/configuration#jetstream. Only configure \"max_memory_store\" or \"max_file_store\", do not set \"store_dir\" as it has been hardcoded.", + "type": "string" + }, + "startArgs": { + "description": "Optional arguments to start nats-server. For example, \"-D\" to enable debugging output, \"-DV\" to enable debugging and tracing. Check https://docs.nats.io/ for all the available arguments.", + "type": "array", + "items": { + "type": "string" + } + }, + "streamConfig": { + "description": "Optional configuration for the streams to be created in this JetStream service, if specified, it will be merged with the default configuration in controller-config. It accepts a YAML format configuration, available fields include, \"maxBytes\", \"maxMsgs\", \"maxAge\" (e.g. 72h), \"replicas\" (1, 3, 5), \"duplicates\" (e.g. 5m).", + "type": "string" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + } + }, + "version": { + "description": "JetStream version, such as \"2.7.3\"", + "type": "string" + } + } + }, + "io.argoproj.eventbus.v1alpha1.JetStreamConfig": { + "type": "object", + "properties": { + "accessSecret": { + "description": "Secret for auth", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "streamConfig": { + "type": "string" + }, + "url": { + "description": "JetStream (Nats) URL", + "type": "string" + } + } + }, + "io.argoproj.eventbus.v1alpha1.KafkaBus": { + "description": "KafkaBus holds the KafkaBus EventBus information", + "type": "object", + "properties": { + "consumerGroup": { + "description": "Consumer group for kafka client", + "$ref": "#/definitions/io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup" + }, + "sasl": { + "description": "SASL configuration for the kafka client", + "$ref": "#/definitions/io.argoproj.common.SASLConfig" + }, + "tls": { + "description": "TLS configuration for the kafka client.", + "$ref": "#/definitions/io.argoproj.common.TLSConfig" + }, + "topic": { + "description": "Topic name, defaults to {namespace_name}-{eventbus_name}", + "type": "string" + }, + "url": { + "description": "URL to kafka cluster, multiple URLs separated by comma", + "type": "string" + }, + "version": { + "description": "Kafka version, sarama defaults to the oldest supported stable version", + "type": "string" + } + } + }, + "io.argoproj.eventbus.v1alpha1.KafkaConsumerGroup": { + "type": "object", + "properties": { + "groupName": { + "description": "Consumer group name, defaults to {namespace_name}-{sensor_name}", + "type": "string" + }, + "rebalanceStrategy": { + "description": "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.", + "type": "string" + }, + "startOldest": { + "description": "When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false", + "type": "boolean" + } + } + }, "io.argoproj.eventbus.v1alpha1.NATSBus": { "description": "NATSBus holds the NATS eventbus information", "type": "object", @@ -412,10 +613,6 @@ "description": "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/", "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" }, - "antiAffinity": { - "description": "Deprecated, use Affinity instead, will be removed in v1.5", - "type": "boolean" - }, "auth": { "type": "string" }, @@ -445,6 +642,15 @@ "type": "integer", "format": "int64" }, + "maxPayload": { + "description": "Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB", + "type": "string" + }, + "maxSubs": { + "description": "Maximum number of subscriptions per channel, 0 means unlimited. Defaults to 1000", + "type": "integer", + "format": "int64" + }, "metadata": { "description": "Metadata sets the pods's metadata, i.e. annotations and labels", "$ref": "#/definitions/io.argoproj.common.Metadata" @@ -472,6 +678,22 @@ "description": "If specified, indicates the EventSource pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", "type": "string" }, + "raftCommitTimeout": { + "description": "Specifies the time without an Apply() operation before sending an heartbeat to ensure timely commit, i.e. \"72h\", “4h35m”. Defaults to 100ms", + "type": "string" + }, + "raftElectionTimeout": { + "description": "Specifies the time in candidate state without a leader before attempting an election, i.e. \"72h\", “4h35m”. Defaults to 2s", + "type": "string" + }, + "raftHeartbeatTimeout": { + "description": "Specifies the time in follower state without a leader before attempting an election, i.e. \"72h\", “4h35m”. Defaults to 2s", + "type": "string" + }, + "raftLeaseTimeout": { + "description": "Specifies how long a leader waits without being able to contact a quorum of nodes before stepping down as leader, i.e. \"72h\", “4h35m”. Defaults to 1s", + "type": "string" + }, "replicas": { "description": "Size is the NATS StatefulSet size", "type": "integer", @@ -542,7 +764,6 @@ "description": "AMQPEventSource refers to an event-source for AMQP stream events", "type": "object", "required": [ - "url", "exchangeName", "exchangeType", "routingKey" @@ -557,11 +778,11 @@ "$ref": "#/definitions/io.argoproj.common.Backoff" }, "consume": { - "description": "Consume holds the configuration to immediately starts delivering queued messages For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.Consume", + "description": "Consume holds the configuration to immediately starts delivering queued messages For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume", "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPConsumeConfig" }, "exchangeDeclare": { - "description": "ExchangeDeclare holds the configuration for the exchange on the server For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.ExchangeDeclare", + "description": "ExchangeDeclare holds the configuration for the exchange on the server For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare", "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPExchangeDeclareConfig" }, "exchangeName": { @@ -572,6 +793,10 @@ "description": "ExchangeType is rabbitmq exchange type", "type": "string" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -584,11 +809,11 @@ } }, "queueBind": { - "description": "QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueBind", + "description": "QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind", "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPQueueBindConfig" }, "queueDeclare": { - "description": "QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches the same parameters For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueDeclare", + "description": "QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches the same parameters For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare", "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AMQPQueueDeclareConfig" }, "routingKey": { @@ -602,6 +827,10 @@ "url": { "description": "URL for rabbitmq service", "type": "string" + }, + "urlSecret": { + "description": "URLSecret is secret reference for rabbitmq service URL", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" } } }, @@ -641,6 +870,10 @@ "description": "AMQPQueueDeclareConfig holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches the same parameters", "type": "object", "properties": { + "arguments": { + "description": "Arguments of a queue (also known as \"x-arguments\") used for optional features and plugins", + "type": "string" + }, "autoDelete": { "description": "AutoDelete removes the queue when no consumers are active", "type": "boolean" @@ -671,6 +904,10 @@ "hubName" ], "properties": { + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "fqdn": { "description": "FQDN of the EventHubs namespace you created More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string", "type": "string" @@ -696,23 +933,32 @@ } } }, - "io.argoproj.eventsource.v1alpha1.CalendarEventSource": { - "description": "CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. Schedule takes precedence over interval; interval takes precedence over recurrence", + "io.argoproj.eventsource.v1alpha1.AzureQueueStorageEventSource": { + "description": "AzureQueueStorageEventSource describes the event source for azure queue storage more info at https://learn.microsoft.com/en-us/azure/storage/queues/", "type": "object", "required": [ - "schedule", - "interval" + "queueName" ], "properties": { - "exclusionDates": { - "type": "array", - "items": { - "type": "string" - } + "connectionString": { + "description": "ConnectionString is the connection string to access Azure Queue Storage. If this fields is not provided it will try to access via Azure AD with StorageAccountName.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, - "interval": { - "description": "Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h...", - "type": "string" + "decodeMessage": { + "description": "DecodeMessage specifies if all the messages should be base64 decoded. If set to true the decoding is done before the evaluation of JSONBody", + "type": "boolean" + }, + "dlq": { + "description": "DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false.", + "type": "boolean" + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" }, "metadata": { "description": "Metadata holds the user defined metadata which will passed along the event payload.", @@ -721,39 +967,332 @@ "type": "string" } }, - "persistence": { - "description": "Persistence hold the configuration for event persistence", - "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventPersistence" - }, - "schedule": { - "description": "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + "queueName": { + "description": "QueueName is the name of the queue", "type": "string" }, - "timezone": { - "description": "Timezone in which to run the schedule", + "storageAccountName": { + "description": "StorageAccountName is the name of the storage account where the queue is. This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set.", "type": "string" }, - "userPayload": { - "description": "UserPayload will be sent to sensor as extra data once the event is triggered Deprecated: will be removed in v1.5. Please use Metadata instead.", - "type": "string", - "format": "byte" + "waitTimeInSeconds": { + "description": "WaitTimeInSeconds is the duration (in seconds) for which the event source waits between empty results from the queue. The default value is 3 seconds.", + "type": "integer", + "format": "int32" } } }, - "io.argoproj.eventsource.v1alpha1.CatchupConfiguration": { + "io.argoproj.eventsource.v1alpha1.AzureServiceBusEventSource": { + "description": "AzureServiceBusEventSource describes the event source for azure service bus More info at https://docs.microsoft.com/en-us/azure/service-bus-messaging/", "type": "object", + "required": [ + "queueName", + "topicName", + "subscriptionName" + ], "properties": { - "enabled": { - "description": "Enabled enables to triggered the missed schedule when eventsource restarts", + "connectionString": { + "description": "ConnectionString is the connection string for the Azure Service Bus. If this fields is not provided it will try to access via Azure AD with DefaultAzureCredential and FullyQualifiedNamespace.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "fullyQualifiedNamespace": { + "description": "FullyQualifiedNamespace is the Service Bus namespace name (ex: myservicebus.servicebus.windows.net). This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set.", + "type": "string" + }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" }, - "maxDuration": { - "description": "MaxDuration holds max catchup duration", + "metadata": { + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "queueName": { + "description": "QueueName is the name of the Azure Service Bus Queue", + "type": "string" + }, + "subscriptionName": { + "description": "SubscriptionName is the name of the Azure Service Bus Topic Subscription", + "type": "string" + }, + "tls": { + "description": "TLS configuration for the service bus client", + "$ref": "#/definitions/io.argoproj.common.TLSConfig" + }, + "topicName": { + "description": "TopicName is the name of the Azure Service Bus Topic", "type": "string" } } }, - "io.argoproj.eventsource.v1alpha1.ConfigMapPersistence": { + "io.argoproj.eventsource.v1alpha1.BitbucketAuth": { + "description": "BitbucketAuth holds the different auth strategies for connecting to Bitbucket", + "type": "object", + "properties": { + "basic": { + "description": "Basic is BasicAuth auth strategy.", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketBasicAuth" + }, + "oauthToken": { + "description": "OAuthToken refers to the K8s secret that holds the OAuth Bearer token.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "io.argoproj.eventsource.v1alpha1.BitbucketBasicAuth": { + "description": "BasicAuth holds the information required to authenticate user via basic auth mechanism", + "type": "object", + "required": [ + "username", + "password" + ], + "properties": { + "password": { + "description": "Password refers to the K8s secret that holds the password.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "username": { + "description": "Username refers to the K8s secret that holds the username.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "io.argoproj.eventsource.v1alpha1.BitbucketEventSource": { + "description": "BitbucketEventSource describes the event source for Bitbucket", + "type": "object", + "required": [ + "webhook", + "auth", + "events" + ], + "properties": { + "auth": { + "description": "Auth information required to connect to Bitbucket.", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketAuth" + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the defined Bitbucket hook once the event source is stopped.", + "type": "boolean" + }, + "events": { + "description": "Events this webhook is subscribed to.", + "type": "array", + "items": { + "type": "string" + } + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "metadata": { + "description": "Metadata holds the user defined metadata which will be passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "owner": { + "description": "DeprecatedOwner is the owner of the repository. Deprecated: use Repositories instead. Will be unsupported in v1.9", + "type": "string" + }, + "projectKey": { + "description": "DeprecatedProjectKey is the key of the project to which the repository relates Deprecated: use Repositories instead. Will be unsupported in v1.9", + "type": "string" + }, + "repositories": { + "description": "Repositories holds a list of repositories for which integration needs to set up", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketRepository" + } + }, + "repositorySlug": { + "description": "DeprecatedRepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL Deprecated: use Repositories instead. Will be unsupported in v1.9", + "type": "string" + }, + "webhook": { + "description": "Webhook refers to the configuration required to run an http server", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext" + } + } + }, + "io.argoproj.eventsource.v1alpha1.BitbucketRepository": { + "type": "object", + "required": [ + "owner", + "repositorySlug" + ], + "properties": { + "owner": { + "description": "Owner is the owner of the repository", + "type": "string" + }, + "repositorySlug": { + "description": "RepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL", + "type": "string" + } + } + }, + "io.argoproj.eventsource.v1alpha1.BitbucketServerEventSource": { + "description": "BitbucketServerEventSource refers to event-source related to Bitbucket Server events", + "type": "object", + "required": [ + "bitbucketserverBaseURL" + ], + "properties": { + "accessToken": { + "description": "AccessToken is reference to K8s secret which holds the bitbucket api access information.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "bitbucketserverBaseURL": { + "description": "BitbucketServerBaseURL is the base URL for API requests to a custom endpoint.", + "type": "string" + }, + "checkInterval": { + "description": "CheckInterval is a duration in which to wait before checking that the webhooks exist, e.g. 1s, 30m, 2h... (defaults to 1m)", + "type": "string" + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the Bitbucket Server hook for the project once the event source is stopped.", + "type": "boolean" + }, + "events": { + "description": "Events are bitbucket event to listen to. Refer https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html", + "type": "array", + "items": { + "type": "string" + } + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "metadata": { + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "projectKey": { + "description": "DeprecatedProjectKey is the key of project for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8.", + "type": "string" + }, + "projects": { + "description": "Projects holds a list of projects for which integration needs to set up, this will add the webhook to all repositories in the project.", + "type": "array", + "items": { + "type": "string" + } + }, + "repositories": { + "description": "Repositories holds a list of repositories for which integration needs to set up.", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketServerRepository" + } + }, + "repositorySlug": { + "description": "DeprecatedRepositorySlug is the slug of the repository for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8.", + "type": "string" + }, + "skipBranchRefsChangedOnOpenPR": { + "description": "SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for branches whenever there's an associated open pull request. This helps in optimizing the event handling process by avoiding unnecessary triggers for branch reference changes that are already part of a pull request under review.", + "type": "boolean" + }, + "tls": { + "description": "TLS configuration for the bitbucketserver client.", + "$ref": "#/definitions/io.argoproj.common.TLSConfig" + }, + "webhook": { + "description": "Webhook holds configuration to run a http server.", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext" + }, + "webhookSecret": { + "description": "WebhookSecret is reference to K8s secret which holds the bitbucket webhook secret (for HMAC validation).", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "io.argoproj.eventsource.v1alpha1.BitbucketServerRepository": { + "type": "object", + "required": [ + "projectKey", + "repositorySlug" + ], + "properties": { + "projectKey": { + "description": "ProjectKey is the key of project for which integration needs to set up.", + "type": "string" + }, + "repositorySlug": { + "description": "RepositorySlug is the slug of the repository for which integration needs to set up.", + "type": "string" + } + } + }, + "io.argoproj.eventsource.v1alpha1.CalendarEventSource": { + "description": "CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. Schedule takes precedence over interval; interval takes precedence over recurrence", + "type": "object", + "properties": { + "exclusionDates": { + "description": "ExclusionDates defines the list of DATE-TIME exceptions for recurring events.", + "type": "array", + "items": { + "type": "string" + } + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "interval": { + "description": "Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h...", + "type": "string" + }, + "metadata": { + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "persistence": { + "description": "Persistence hold the configuration for event persistence", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventPersistence" + }, + "schedule": { + "description": "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + "type": "string" + }, + "timezone": { + "description": "Timezone in which to run the schedule", + "type": "string" + } + } + }, + "io.argoproj.eventsource.v1alpha1.CatchupConfiguration": { + "type": "object", + "properties": { + "enabled": { + "description": "Enabled enables to triggered the missed schedule when eventsource restarts", + "type": "boolean" + }, + "maxDuration": { + "description": "MaxDuration holds max catchup duration", + "type": "string" + } + } + }, + "io.argoproj.eventsource.v1alpha1.ConfigMapPersistence": { "type": "object", "properties": { "createIfNotExist": { @@ -791,6 +1330,10 @@ "description": "Backoff holds parameters applied to connection.", "$ref": "#/definitions/io.argoproj.common.Backoff" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -856,6 +1399,14 @@ } } }, + "io.argoproj.eventsource.v1alpha1.EventSourceFilter": { + "type": "object", + "properties": { + "expression": { + "type": "string" + } + } + }, "io.argoproj.eventsource.v1alpha1.EventSourceList": { "description": "EventSourceList is the list of eventsource resources", "type": "object", @@ -901,6 +1452,34 @@ "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AzureEventsHubEventSource" } }, + "azureQueueStorage": { + "description": "AzureQueueStorage event source", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AzureQueueStorageEventSource" + } + }, + "azureServiceBus": { + "description": "Azure Service Bus event source", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.AzureServiceBusEventSource" + } + }, + "bitbucket": { + "description": "Bitbucket event sources", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketEventSource" + } + }, + "bitbucketserver": { + "description": "Bitbucket Server event sources", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.BitbucketServerEventSource" + } + }, "calendar": { "description": "Calendar event sources", "type": "object", @@ -933,6 +1512,13 @@ "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GenericEventSource" } }, + "gerrit": { + "description": "Gerrit event source", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GerritEventSource" + } + }, "github": { "description": "Github event sources", "type": "object", @@ -1010,10 +1596,12 @@ "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.RedisEventSource" } }, - "replica": { - "description": "DeprecatedReplica is the event source deployment replicas Deprecated: use replicas instead, will be removed in v1.5", - "type": "integer", - "format": "int32" + "redisStream": { + "description": "Redis stream source", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.RedisStreamEventSource" + } }, "replicas": { "description": "Replicas is the event source deployment replicas", @@ -1031,6 +1619,13 @@ "description": "Service is the specifications of the service to expose the event source", "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.Service" }, + "sftp": { + "description": "SFTP event sources", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.SFTPEventSource" + } + }, "slack": { "description": "Slack event sources", "type": "object", @@ -1074,7 +1669,7 @@ "description": "Webhook event sources", "type": "object", "additionalProperties": { - "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext" + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookEventSource" } } } @@ -1106,6 +1701,10 @@ "description": "Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information", "type": "string" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "metadata": { "description": "Metadata holds the user defined metadata which will passed along the event payload.", "type": "object", @@ -1139,6 +1738,10 @@ "description": "Config is the event source configuration", "type": "string" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "insecure": { "description": "Insecure determines the type of connection.", "type": "boolean" @@ -1160,41 +1763,131 @@ } } }, - "io.argoproj.eventsource.v1alpha1.GithubEventSource": { - "description": "GithubEventSource refers to event-source for github related events", + "io.argoproj.eventsource.v1alpha1.GerritEventSource": { + "description": "GerritEventSource refers to event-source related to gerrit events", "type": "object", "required": [ - "id", - "owner", - "repository", - "events" + "hookName", + "events", + "gerritBaseURL" ], "properties": { - "active": { - "description": "Active refers to status of the webhook for event deliveries. https://developer.github.com/webhooks/creating/#active", - "type": "boolean" - }, - "apiToken": { - "description": "APIToken refers to a K8s secret containing github api token", - "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" - }, - "contentType": { - "description": "ContentType of the event delivery", - "type": "string" + "auth": { + "description": "Auth hosts secret selectors for username and password", + "$ref": "#/definitions/io.argoproj.common.BasicAuth" }, "deleteHookOnFinish": { - "description": "DeleteHookOnFinish determines whether to delete the GitHub hook for the repository once the event source is stopped.", + "description": "DeleteHookOnFinish determines whether to delete the Gerrit hook for the project once the event source is stopped.", "type": "boolean" }, "events": { + "description": "Events are gerrit event to listen to. Refer https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events", "type": "array", "items": { "type": "string" } }, - "githubBaseURL": { - "description": "GitHub base URL (for GitHub Enterprise)", - "type": "string" + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "gerritBaseURL": { + "description": "GerritBaseURL is the base URL for API requests to a custom endpoint", + "type": "string" + }, + "hookName": { + "description": "HookName is the name of the webhook", + "type": "string" + }, + "metadata": { + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "projects": { + "description": "List of project namespace paths like \"whynowy/test\".", + "type": "array", + "items": { + "type": "string" + } + }, + "sslVerify": { + "description": "SslVerify to enable ssl verification", + "type": "boolean" + }, + "webhook": { + "description": "Webhook holds configuration to run a http server", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext" + } + } + }, + "io.argoproj.eventsource.v1alpha1.GithubAppCreds": { + "type": "object", + "required": [ + "privateKey", + "appID", + "installationID" + ], + "properties": { + "appID": { + "description": "AppID refers to the GitHub App ID for the application you created", + "type": "integer", + "format": "int64" + }, + "installationID": { + "description": "InstallationID refers to the Installation ID of the GitHub app you created and installed", + "type": "integer", + "format": "int64" + }, + "privateKey": { + "description": "PrivateKey refers to a K8s secret containing the GitHub app private key", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "io.argoproj.eventsource.v1alpha1.GithubEventSource": { + "description": "GithubEventSource refers to event-source for github related events", + "type": "object", + "required": [ + "events" + ], + "properties": { + "active": { + "description": "Active refers to status of the webhook for event deliveries. https://developer.github.com/webhooks/creating/#active", + "type": "boolean" + }, + "apiToken": { + "description": "APIToken refers to a K8s secret containing github api token", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "contentType": { + "description": "ContentType of the event delivery", + "type": "string" + }, + "deleteHookOnFinish": { + "description": "DeleteHookOnFinish determines whether to delete the GitHub hook for the repository once the event source is stopped.", + "type": "boolean" + }, + "events": { + "description": "Events refer to Github events to which the event source will subscribe", + "type": "array", + "items": { + "type": "string" + } + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "githubApp": { + "description": "GitHubApp holds the GitHub app credentials", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.GithubAppCreds" + }, + "githubBaseURL": { + "description": "GitHub base URL (for GitHub Enterprise)", + "type": "string" }, "githubUploadURL": { "description": "GitHub upload URL (for GitHub Enterprise)", @@ -1216,12 +1909,23 @@ "type": "string" } }, + "organizations": { + "description": "Organizations holds the names of organizations (used for organization level webhooks). Not required if Repositories is set.", + "type": "array", + "items": { + "type": "string" + } + }, "owner": { "description": "DeprecatedOwner refers to GitHub owner name i.e. argoproj Deprecated: use Repositories instead. Will be unsupported in v 1.6", "type": "string" }, + "payloadEnrichment": { + "description": "PayloadEnrichment holds flags that determine whether to enrich GitHub's original payload with additional information.", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.PayloadEnrichmentFlags" + }, "repositories": { - "description": "Repositories holds the information of repositories, which uses repo owner as the key, and list of repo names as the value", + "description": "Repositories holds the information of repositories, which uses repo owner as the key, and list of repo names as the value. Not required if Organizations is set.", "type": "array", "items": { "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.OwnedRepositories" @@ -1245,13 +1949,12 @@ "description": "GitlabEventSource refers to event-source related to Gitlab events", "type": "object", "required": [ - "projectID", "events", "gitlabBaseURL" ], "properties": { "accessToken": { - "description": "AccessToken is reference to k8 secret which holds the gitlab api access information", + "description": "AccessToken references to k8 secret which holds the gitlab api access information", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, "deleteHookOnFinish": { @@ -1269,10 +1972,21 @@ "type": "string" } }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "gitlabBaseURL": { "description": "GitlabBaseURL is the base URL for API requests to a custom endpoint", "type": "string" }, + "groups": { + "description": "List of group IDs or group name like \"test\". Group level hook available in Premium and Ultimate Gitlab.", + "type": "array", + "items": { + "type": "string" + } + }, "metadata": { "description": "Metadata holds the user defined metadata which will passed along the event payload.", "type": "object", @@ -1281,9 +1995,20 @@ } }, "projectID": { - "description": "ProjectID is the id of project for which integration needs to setup", + "description": "DeprecatedProjectID is the id of project for which integration needs to setup Deprecated: use Projects instead. Will be unsupported in v 1.7", "type": "string" }, + "projects": { + "description": "List of project IDs or project namespace paths like \"whynowy/test\". Projects and groups cannot be empty at the same time.", + "type": "array", + "items": { + "type": "string" + } + }, + "secretToken": { + "description": "SecretToken references to k8 secret which holds the Secret Token used by webhook config", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, "webhook": { "description": "Webhook holds configuration to run a http server", "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WebhookContext" @@ -1313,6 +2038,10 @@ "description": "Directory to watch for events", "type": "string" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "hdfsUser": { "description": "HDFSUser is the user to access HDFS file system. It is ignored if either ccache or keytab is used.", "type": "string" @@ -1387,10 +2116,13 @@ "type": "object", "required": [ "url", - "partition", "topic" ], "properties": { + "config": { + "description": "Yaml format Sarama config for Kafka connection. It follows the struct of sarama.Config. See https://github.com/IBM/sarama/blob/main/config.go e.g.\n\nconsumer:\n fetch:\n min: 1\nnet:\n MaxOpenRequests: 5", + "type": "string" + }, "connectionBackoff": { "description": "Backoff holds parameters applied to connection.", "$ref": "#/definitions/io.argoproj.common.Backoff" @@ -1399,6 +2131,10 @@ "description": "Consumer group for kafka client", "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.KafkaConsumerGroup" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -1450,6 +2186,10 @@ "clientId" ], "properties": { + "auth": { + "description": "Auth hosts secret selectors for username and password", + "$ref": "#/definitions/io.argoproj.common.BasicAuth" + }, "clientId": { "description": "ClientID is the id of the client", "type": "string" @@ -1458,6 +2198,10 @@ "description": "ConnectionBackoff holds backoff applied to connection.", "$ref": "#/definitions/io.argoproj.common.Backoff" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -1521,6 +2265,10 @@ "description": "ConnectionBackoff holds backoff applied to connection.", "$ref": "#/definitions/io.argoproj.common.Backoff" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -1532,6 +2280,10 @@ "type": "string" } }, + "queue": { + "description": "Queue is the name of the queue group to subscribe as if specified. Uses QueueSubscribe logic to subscribe as queue group. If the queue is empty, uses default Subscribe logic.", + "type": "string" + }, "subject": { "description": "Subject holds the name of the subject onto which messages are published", "type": "string" @@ -1563,6 +2315,10 @@ "description": "Backoff holds parameters applied to connection.", "$ref": "#/definitions/io.argoproj.common.Backoff" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "hostAddress": { "description": "HostAddress is the address of the host for NSQ lookup", "type": "string" @@ -1599,30 +2355,36 @@ } }, "owner": { - "description": "Orgnization or user name", + "description": "Organization or user name", "type": "string" } } }, + "io.argoproj.eventsource.v1alpha1.PayloadEnrichmentFlags": { + "type": "object", + "properties": { + "fetchPROnPRCommentAdded": { + "description": "FetchPROnPRCommentAdded determines whether to enrich the payload provided by GitHub on \"pull request comment added\" events, with the full pull request info", + "type": "boolean" + } + } + }, "io.argoproj.eventsource.v1alpha1.PubSubEventSource": { "description": "PubSubEventSource refers to event-source for GCP PubSub related events.", "type": "object", - "required": [ - "credentialsFile" - ], "properties": { "credentialSecret": { "description": "CredentialSecret references to the secret that contains JSON credentials to access GCP. If it is missing, it implicitly uses Workload Identity to access. https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, - "credentialsFile": { - "description": "CredentialsFile is the file that contains credentials to authenticate for GCP Deprecated: will be removed in v1.5, use CredentialSecret instead", - "type": "string" - }, "deleteSubscriptionOnFinish": { "description": "DeleteSubscriptionOnFinish determines whether to delete the GCP PubSub subscription once the event source is stopped.", "type": "boolean" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -1660,10 +2422,29 @@ "url" ], "properties": { + "authAthenzParams": { + "description": "Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "authAthenzSecret": { + "description": "Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "authTokenSecret": { + "description": "Authentication token for the pulsar client. Either token or athenz can be set to use auth.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, "connectionBackoff": { "description": "Backoff holds parameters applied to connection.", "$ref": "#/definitions/io.argoproj.common.Backoff" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -1727,10 +2508,18 @@ "type": "integer", "format": "int32" }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "hostAddress": { "description": "HostAddress refers to the address of the Redis host/server", "type": "string" }, + "jsonBody": { + "description": "JSONBody specifies that all event body payload coming from this source will be JSON", + "type": "boolean" + }, "metadata": { "description": "Metadata holds the user defined metadata which will passed along the event payload.", "type": "object", @@ -1749,6 +2538,68 @@ "tls": { "description": "TLS configuration for the redis client.", "$ref": "#/definitions/io.argoproj.common.TLSConfig" + }, + "username": { + "description": "Username required for ACL style authentication if any.", + "type": "string" + } + } + }, + "io.argoproj.eventsource.v1alpha1.RedisStreamEventSource": { + "description": "RedisStreamEventSource describes an event source for Redis streams (https://redis.io/topics/streams-intro)", + "type": "object", + "required": [ + "hostAddress", + "streams" + ], + "properties": { + "consumerGroup": { + "description": "ConsumerGroup refers to the Redis stream consumer group that will be created on all redis streams. Messages are read through this group. Defaults to 'argo-events-cg'", + "type": "string" + }, + "db": { + "description": "DB to use. If not specified, default DB 0 will be used.", + "type": "integer", + "format": "int32" + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "hostAddress": { + "description": "HostAddress refers to the address of the Redis host/server (master instance)", + "type": "string" + }, + "maxMsgCountPerRead": { + "description": "MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. Same as COUNT option in XREADGROUP(https://redis.io/topics/streams-intro). Defaults to 10", + "type": "integer", + "format": "int32" + }, + "metadata": { + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "password": { + "description": "Password required for authentication if any.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "streams": { + "description": "Streams to look for entries. XREADGROUP is used on all streams using a single consumer group.", + "type": "array", + "items": { + "type": "string" + } + }, + "tls": { + "description": "TLS configuration for the redis client.", + "$ref": "#/definitions/io.argoproj.common.TLSConfig" + }, + "username": { + "description": "Username required for ACL style authentication if any.", + "type": "string" } } }, @@ -1760,9 +2611,14 @@ "group", "version", "resource", - "eventTypes" + "eventTypes", + "cluster" ], "properties": { + "cluster": { + "description": "Cluster from which events will be listened to", + "type": "string" + }, "eventTypes": { "description": "EventTypes is the list of event type to watch. Possible values are - ADD, UPDATE and DELETE.", "type": "array", @@ -1797,7 +2653,7 @@ } }, "io.argoproj.eventsource.v1alpha1.ResourceFilter": { - "description": "ResourceFilter contains K8 ObjectMeta information to further filter resource event objects", + "description": "ResourceFilter contains K8s ObjectMeta information to further filter resource event objects", "type": "object", "properties": { "afterStart": { @@ -1816,7 +2672,7 @@ } }, "labels": { - "description": "Labels provide listing options to K8s API to watch resource/s. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info.", + "description": "Labels provide listing options to K8s API to watch resource/s. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info. Unlike K8s field selector, multiple values are passed as comma separated values instead of list of values. Eg: value: value1,value2. Same as K8s label selector, operator \"=\", \"==\", \"!=\", \"exists\", \"!\", \"notin\", \"in\", \"gt\" and \"lt\" are supported", "type": "array", "items": { "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.Selector" @@ -1828,6 +2684,55 @@ } } }, + "io.argoproj.eventsource.v1alpha1.SFTPEventSource": { + "description": "SFTPEventSource describes an event-source for sftp related events.", + "type": "object", + "required": [ + "eventType", + "watchPathConfig" + ], + "properties": { + "address": { + "description": "Address sftp address.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "eventType": { + "description": "Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information", + "type": "string" + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "metadata": { + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "password": { + "description": "Password required for authentication if any.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "pollIntervalDuration": { + "description": "PollIntervalDuration the interval at which to poll the SFTP server defaults to 10 seconds", + "type": "string" + }, + "sshKeySecret": { + "description": "SSHKeySecret refers to the secret that contains SSH key. Key needs to contain private key and public key.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "username": { + "description": "Username required for authentication if any.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "watchPathConfig": { + "description": "WatchPathConfig contains configuration about the file path to watch", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.WatchPathConfig" + } + } + }, "io.argoproj.eventsource.v1alpha1.SNSEventSource": { "description": "SNSEventSource refers to event-source for AWS SNS related events", "type": "object", @@ -1837,9 +2742,17 @@ ], "properties": { "accessKey": { - "description": "AccessKey refers K8 secret containing aws access key", + "description": "AccessKey refers K8s secret containing aws access key", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, + "endpoint": { + "description": "Endpoint configures connection to a specific SNS endpoint instead of Amazons servers", + "type": "string" + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "metadata": { "description": "Metadata holds the user defined metadata which will passed along the event payload.", "type": "object", @@ -1856,7 +2769,7 @@ "type": "string" }, "secretKey": { - "description": "SecretKey refers K8 secret containing aws secret key", + "description": "SecretKey refers K8s secret containing aws secret key", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, "topicArn": { @@ -1883,9 +2796,21 @@ ], "properties": { "accessKey": { - "description": "AccessKey refers K8 secret containing aws access key", + "description": "AccessKey refers K8s secret containing aws access key", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, + "dlq": { + "description": "DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false.", + "type": "boolean" + }, + "endpoint": { + "description": "Endpoint configures connection to a specific SQS endpoint instead of Amazons servers", + "type": "string" + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "jsonBody": { "description": "JSONBody specifies that all event body payload coming from this source will be JSON", "type": "boolean" @@ -1914,7 +2839,11 @@ "type": "string" }, "secretKey": { - "description": "SecretKey refers K8 secret containing aws secret key", + "description": "SecretKey refers K8s secret containing aws secret key", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "sessionToken": { + "description": "SessionToken refers to K8s secret containing AWS temporary credentials(STS) session token", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, "waitTimeSeconds": { @@ -1937,7 +2866,7 @@ "type": "string" }, "operation": { - "description": "Supported operations like ==, !=, \u003c=, \u003e= etc. Defaults to ==. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info.", + "description": "Supported operations like ==, != etc. Defaults to ==. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info.", "type": "string" }, "value": { @@ -1974,6 +2903,10 @@ "description": "SlackEventSource refers to event-source for Slack related events", "type": "object", "properties": { + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, "metadata": { "description": "Metadata holds the user defined metadata which will passed along the event payload.", "type": "object", @@ -2201,6 +3134,11 @@ "description": "REST API endpoint", "type": "string" }, + "maxPayloadSize": { + "description": "MaxPayloadSize is the maximum webhook payload size that the server will accept. Requests exceeding that limit will be rejected with \"request too large\" response. Default value: 1048576 (1MB).", + "type": "integer", + "format": "int64" + }, "metadata": { "description": "Metadata holds the user defined metadata which will passed along the event payload.", "type": "object", @@ -2216,17 +3154,65 @@ "description": "Port on which HTTP server is listening for incoming events.", "type": "string" }, - "serverCertPath": { - "description": "DeprecatedServerCertPath refers the file that contains the cert.", - "type": "string" - }, "serverCertSecret": { "description": "ServerCertPath refers the file that contains the cert.", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, - "serverKeyPath": { - "description": "DeprecatedServerKeyPath refers the file that contains private key", - "type": "string" + "serverKeySecret": { + "description": "ServerKeyPath refers the file that contains private key", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "url": { + "description": "URL is the url of the server.", + "type": "string" + } + } + }, + "io.argoproj.eventsource.v1alpha1.WebhookEventSource": { + "description": "CalendarEventSource describes an HTTP based EventSource", + "type": "object", + "required": [ + "endpoint", + "method", + "port", + "url" + ], + "properties": { + "authSecret": { + "description": "AuthSecret holds a secret selector that contains a bearer token for authentication", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "endpoint": { + "description": "REST API endpoint", + "type": "string" + }, + "filter": { + "description": "Filter", + "$ref": "#/definitions/io.argoproj.eventsource.v1alpha1.EventSourceFilter" + }, + "maxPayloadSize": { + "description": "MaxPayloadSize is the maximum webhook payload size that the server will accept. Requests exceeding that limit will be rejected with \"request too large\" response. Default value: 1048576 (1MB).", + "type": "integer", + "format": "int64" + }, + "metadata": { + "description": "Metadata holds the user defined metadata which will passed along the event payload.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "method": { + "description": "Method is HTTP request method that indicates the desired action to be performed for a given resource. See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content", + "type": "string" + }, + "port": { + "description": "Port on which HTTP server is listening for incoming events.", + "type": "string" + }, + "serverCertSecret": { + "description": "ServerCertPath refers the file that contains the cert.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, "serverKeySecret": { "description": "ServerKeyPath refers the file that contains private key", @@ -2248,7 +3234,7 @@ ], "properties": { "accessKey": { - "description": "AccessKey refers K8 secret containing aws access key", + "description": "AccessKey refers K8s secret containing aws access key", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" }, "functionName": { @@ -2277,8 +3263,12 @@ "description": "Region is AWS region", "type": "string" }, + "roleARN": { + "description": "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + "type": "string" + }, "secretKey": { - "description": "SecretKey refers K8 secret containing aws secret key", + "description": "SecretKey refers K8s secret containing aws secret key", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" } } @@ -2286,14 +3276,13 @@ "io.argoproj.sensor.v1alpha1.ArgoWorkflowTrigger": { "description": "ArgoWorkflowTrigger is the trigger for the Argo Workflow", "type": "object", - "required": [ - "group", - "version", - "resource" - ], "properties": { - "group": { - "type": "string" + "args": { + "description": "Args is the list of arguments to pass to the argo CLI", + "type": "array", + "items": { + "type": "string" + } }, "operation": { "description": "Operation refers to the type of operation performed on the argo workflow resource. Default value is Submit.", @@ -2306,15 +3295,9 @@ "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" } }, - "resource": { - "type": "string" - }, "source": { - "description": "Source of the K8 resource file(s)", + "description": "Source of the K8s resource file(s)", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ArtifactLocation" - }, - "version": { - "type": "string" } } }, @@ -2394,6 +3377,72 @@ } } }, + "io.argoproj.sensor.v1alpha1.AzureServiceBusTrigger": { + "type": "object", + "required": [ + "queueName", + "topicName", + "subscriptionName", + "payload" + ], + "properties": { + "connectionString": { + "description": "ConnectionString is the connection string for the Azure Service Bus", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + } + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + } + }, + "queueName": { + "description": "QueueName is the name of the Azure Service Bus Queue", + "type": "string" + }, + "subscriptionName": { + "description": "SubscriptionName is the name of the Azure Service Bus Topic Subscription", + "type": "string" + }, + "tls": { + "description": "TLS configuration for the service bus client", + "$ref": "#/definitions/io.argoproj.common.TLSConfig" + }, + "topicName": { + "description": "TopicName is the name of the Azure Service Bus Topic", + "type": "string" + } + } + }, + "io.argoproj.sensor.v1alpha1.ConditionsResetByTime": { + "type": "object", + "properties": { + "cron": { + "description": "Cron is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + "type": "string" + }, + "timezone": { + "type": "string" + } + } + }, + "io.argoproj.sensor.v1alpha1.ConditionsResetCriteria": { + "type": "object", + "properties": { + "byTime": { + "description": "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ConditionsResetByTime" + } + } + }, "io.argoproj.sensor.v1alpha1.CustomTrigger": { "description": "CustomTrigger refers to the specification of the custom trigger.", "type": "object", @@ -2404,10 +3453,6 @@ "payload" ], "properties": { - "certFilePath": { - "description": "DeprecatedCertFilePath is path to the cert file within sensor for secure connection between sensor and custom trigger gRPC server. Deprecated: will be removed in v1.5, use CertSecret instead", - "type": "string" - }, "certSecret": { "description": "CertSecret refers to the secret that contains cert for secure connection between sensor and custom trigger gRPC server.", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" @@ -2481,23 +3526,51 @@ } } }, - "io.argoproj.sensor.v1alpha1.DependencyGroup": { - "description": "DependencyGroup is the group of dependencies", + "io.argoproj.sensor.v1alpha1.EmailTrigger": { + "description": "EmailTrigger refers to the specification of the email notification trigger.", "type": "object", - "required": [ - "name", - "dependencies" - ], "properties": { - "dependencies": { - "description": "Dependencies of events", + "body": { + "description": "Body refers to the body/content of the email send.", + "type": "string" + }, + "from": { + "description": "From refers to the address from which the email is send from.", + "type": "string" + }, + "host": { + "description": "Host refers to the smtp host url to which email is send.", + "type": "string" + }, + "parameters": { + "description": "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + } + }, + "port": { + "description": "Port refers to the smtp server port to which email is send. Defaults to 0.", + "type": "integer", + "format": "int32" + }, + "smtpPassword": { + "description": "SMTPPassword refers to the Kubernetes secret that holds the smtp password used to connect to smtp server.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "subject": { + "description": "Subject refers to the subject line for the email send.", + "type": "string" + }, + "to": { + "description": "To refers to the email addresses to which the emails are send.", "type": "array", "items": { "type": "string" } }, - "name": { - "description": "Name of the group", + "username": { + "description": "Username refers to the username used to connect to the smtp server.", "type": "string" } } @@ -2582,9 +3655,17 @@ "description": "Filters and rules governing toleration of success and constraints on the context and data of an event", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventDependencyFilter" }, + "filtersLogicalOperator": { + "description": "FiltersLogicalOperator defines how different filters are evaluated together. Available values: and (\u0026\u0026), or (||) Is optional and if left blank treated as and (\u0026\u0026).", + "type": "string" + }, "name": { "description": "Name is a unique name of this dependency", "type": "string" + }, + "transform": { + "description": "Transform transforms the event data", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventDependencyTransformer" } } }, @@ -2603,6 +3684,14 @@ "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.DataFilter" } }, + "dataLogicalOperator": { + "description": "DataLogicalOperator defines how multiple Data filters (if defined) are evaluated together. Available values: and (\u0026\u0026), or (||) Is optional and if left blank treated as and (\u0026\u0026).", + "type": "string" + }, + "exprLogicalOperator": { + "description": "ExprLogicalOperator defines how multiple Exprs filters (if defined) are evaluated together. Available values: and (\u0026\u0026), or (||) Is optional and if left blank treated as and (\u0026\u0026).", + "type": "string" + }, "exprs": { "description": "Exprs contains the list of expressions evaluated against the event payload.", "type": "array", @@ -2610,12 +3699,30 @@ "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ExprFilter" } }, + "script": { + "description": "Script refers to a Lua script evaluated to determine the validity of an event.", + "type": "string" + }, "time": { "description": "Time filter on the event with escalation", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TimeFilter" } } }, + "io.argoproj.sensor.v1alpha1.EventDependencyTransformer": { + "description": "EventDependencyTransformer transforms the event", + "type": "object", + "properties": { + "jq": { + "description": "JQ holds the jq command applied for transformation", + "type": "string" + }, + "script": { + "description": "Script refers to a Lua script used to transform the event", + "type": "string" + } + } + }, "io.argoproj.sensor.v1alpha1.ExprFilter": { "type": "object", "required": [ @@ -2670,6 +3777,10 @@ "description": "Path to file that contains trigger resource definition", "type": "string" }, + "insecureIgnoreHostKey": { + "description": "Whether to ignore host key", + "type": "boolean" + }, "ref": { "description": "Ref to use to pull trigger resource. Will result in a shallow clone and fetch.", "type": "string" @@ -2678,10 +3789,6 @@ "description": "Remote to manage set of tracked repositories. Defaults to \"origin\". Refer https://git-scm.com/docs/git-remote", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.GitRemoteConfig" }, - "sshKeyPath": { - "description": "DeprecatedSSHKeyPath is path to your ssh key path. Use this if you don't want to provide username and password. ssh key path must be mounted in sensor pod. Deprecated: will be removed in v1.5, use SSHKeySecret instead.", - "type": "string" - }, "sshKeySecret": { "description": "SSHKeySecret refers to the secret that contains SSH key", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" @@ -2818,7 +3925,6 @@ "required": [ "url", "topic", - "partition", "payload" ], "properties": { @@ -2839,12 +3945,12 @@ } }, "partition": { - "description": "Partition to write data to.", + "description": "DEPRECATED", "type": "integer", "format": "int32" }, "partitioningKey": { - "description": "The partitioning key for the messages put on the Kafka topic. Defaults to broker url.", + "description": "The partitioning key for the messages put on the Kafka topic.", "type": "string" }, "payload": { @@ -2863,6 +3969,10 @@ "description": "SASL configuration for the kafka client", "$ref": "#/definitions/io.argoproj.common.SASLConfig" }, + "schemaRegistry": { + "description": "Schema Registry configuration to producer message with avro format", + "$ref": "#/definitions/io.argoproj.common.SchemaRegistryConfig" + }, "tls": { "description": "TLS configuration for the Kafka producer.", "$ref": "#/definitions/io.argoproj.common.TLSConfig" @@ -2989,6 +4099,87 @@ } } }, + "io.argoproj.sensor.v1alpha1.PulsarTrigger": { + "description": "PulsarTrigger refers to the specification of the Pulsar trigger.", + "type": "object", + "required": [ + "url", + "topic", + "payload" + ], + "properties": { + "authAthenzParams": { + "description": "Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "authAthenzSecret": { + "description": "Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "authTokenSecret": { + "description": "Authentication token for the pulsar client. Either token or athenz can be set to use auth.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "connectionBackoff": { + "description": "Backoff holds parameters applied to connection.", + "$ref": "#/definitions/io.argoproj.common.Backoff" + }, + "parameters": { + "description": "Parameters is the list of parameters that is applied to resolved Kafka trigger object.", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + } + }, + "payload": { + "description": "Payload is the list of key-value extracted from an event payload to construct the request payload.", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" + } + }, + "tls": { + "description": "TLS configuration for the pulsar client.", + "$ref": "#/definitions/io.argoproj.common.TLSConfig" + }, + "tlsAllowInsecureConnection": { + "description": "Whether the Pulsar client accept untrusted TLS certificate from broker.", + "type": "boolean" + }, + "tlsTrustCertsSecret": { + "description": "Trusted TLS certificate secret.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "tlsValidateHostname": { + "description": "Whether the Pulsar client verify the validity of the host name from broker.", + "type": "boolean" + }, + "topic": { + "description": "Name of the topic. See https://pulsar.apache.org/docs/en/concepts-messaging/", + "type": "string" + }, + "url": { + "description": "Configure the service URL for the Pulsar service.", + "type": "string" + } + } + }, + "io.argoproj.sensor.v1alpha1.RateLimit": { + "type": "object", + "properties": { + "requestsPerUnit": { + "type": "integer", + "format": "int32" + }, + "unit": { + "description": "Defaults to Second", + "type": "string" + } + } + }, "io.argoproj.sensor.v1alpha1.Sensor": { "description": "Sensor is the definition of a sensor resource", "type": "object", @@ -3051,10 +4242,6 @@ "triggers" ], "properties": { - "circuit": { - "description": "Circuit is a boolean expression of dependency groups Deprecated: will be removed in v1.5, use Switch in triggers instead.", - "type": "string" - }, "dependencies": { "description": "Dependencies is a list of the events that this sensor is dependent on.", "type": "array", @@ -3062,13 +4249,6 @@ "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EventDependency" } }, - "dependencyGroups": { - "description": "DependencyGroups is a list of the groups of events.", - "type": "array", - "items": { - "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.DependencyGroup" - } - }, "errorOnFailedRound": { "description": "ErrorOnFailedRound if set to true, marks sensor state as `error` if the previous trigger round fails. Once sensor state is set to `error`, no further triggers will be processed.", "type": "boolean" @@ -3077,11 +4257,23 @@ "description": "EventBusName references to a EventBus name. By default the value is \"default\"", "type": "string" }, + "loggingFields": { + "description": "LoggingFields add additional key-value pairs when logging happens", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, "replicas": { "description": "Replicas is the sensor deployment replicas", "type": "integer", "format": "int32" }, + "revisionHistoryLimit": { + "description": "RevisionHistoryLimit specifies how many old deployment revisions to retain", + "type": "integer", + "format": "int32" + }, "template": { "description": "Template is the pod specification for the sensor", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.Template" @@ -3110,12 +4302,46 @@ } } }, - "io.argoproj.sensor.v1alpha1.SlackTrigger": { - "description": "SlackTrigger refers to the specification of the slack notification trigger.", + "io.argoproj.sensor.v1alpha1.SlackSender": { "type": "object", "properties": { - "channel": { - "description": "Channel refers to which Slack channel to send slack message.", + "icon": { + "description": "Icon is the Slack application's icon, e.g. :robot_face: or https://example.com/image.png", + "type": "string" + }, + "username": { + "description": "Username is the Slack application's username", + "type": "string" + } + } + }, + "io.argoproj.sensor.v1alpha1.SlackThread": { + "type": "object", + "properties": { + "broadcastMessageToChannel": { + "description": "BroadcastMessageToChannel allows to also broadcast the message from the thread to the channel", + "type": "boolean" + }, + "messageAggregationKey": { + "description": "MessageAggregationKey allows to aggregate the messages to a thread by some key.", + "type": "string" + } + } + }, + "io.argoproj.sensor.v1alpha1.SlackTrigger": { + "description": "SlackTrigger refers to the specification of the slack notification trigger.", + "type": "object", + "properties": { + "attachments": { + "description": "Attachments is a JSON format string that represents an array of Slack attachments according to the attachments API: https://api.slack.com/reference/messaging/attachments .", + "type": "string" + }, + "blocks": { + "description": "Blocks is a JSON format string that represents an array of Slack blocks according to the blocks API: https://api.slack.com/reference/block-kit/blocks .", + "type": "string" + }, + "channel": { + "description": "Channel refers to which Slack channel to send Slack message.", "type": "string" }, "message": { @@ -3129,24 +4355,24 @@ "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerParameter" } }, + "sender": { + "description": "Sender refers to additional configuration of the Slack application that sends the message.", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SlackSender" + }, "slackToken": { "description": "SlackToken refers to the Kubernetes secret that holds the slack token required to send messages.", "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + }, + "thread": { + "description": "Thread refers to additional options for sending messages to a Slack thread.", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SlackThread" } } }, "io.argoproj.sensor.v1alpha1.StandardK8STrigger": { "description": "StandardK8STrigger is the standard Kubernetes resource trigger", "type": "object", - "required": [ - "group", - "version", - "resource" - ], "properties": { - "group": { - "type": "string" - }, "liveObject": { "description": "LiveObject specifies whether the resource should be directly fetched from K8s instead of being marshaled from the resource artifact. If set to true, the resource artifact must contain the information required to uniquely identify the resource in the cluster, that is, you must specify \"apiVersion\", \"kind\" as well as \"name\" and \"namespace\" meta data. Only valid for operation type `update`", "type": "boolean" @@ -3166,15 +4392,9 @@ "description": "PatchStrategy controls the K8s object patching strategy when the trigger operation is specified as patch. possible values: \"application/json-patch+json\" \"application/merge-patch+json\" \"application/strategic-merge-patch+json\" \"application/apply-patch+yaml\". Defaults to \"application/merge-patch+json\"", "type": "string" }, - "resource": { - "type": "string" - }, "source": { - "description": "Source of the K8 resource file(s)", + "description": "Source of the K8s resource file(s)", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ArtifactLocation" - }, - "version": { - "type": "string" } } }, @@ -3283,6 +4503,10 @@ "description": "Trigger is an action taken, output produced, an event created, a message sent", "type": "object", "properties": { + "atLeastOnce": { + "description": "AtLeastOnce determines the trigger execution semantics. Defaults to false. Trigger execution will use at-most-once semantics. If set to true, Trigger execution will switch to at-least-once semantics.", + "type": "boolean" + }, "parameters": { "description": "Parameters is the list of parameters applied to the trigger template definition", "type": "array", @@ -3294,6 +4518,10 @@ "description": "Policy to configure backoff and execution criteria for the trigger", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerPolicy" }, + "rateLimit": { + "description": "Rate limit, default unit is Second", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.RateLimit" + }, "retryStrategy": { "description": "Retry strategy, defaults to no retry", "$ref": "#/definitions/io.argoproj.common.Backoff" @@ -3352,6 +4580,10 @@ "description": "DependencyName refers to the name of the dependency. The event which is stored for this dependency is used as payload for the parameterization. Make sure to refer to one of the dependencies you have defined under Dependencies list.", "type": "string" }, + "useRawData": { + "description": "UseRawData indicates if the value in an event at data key should be used without converting to string. When true, a number, boolean, json or string parameter may be extracted. When the field is unspecified, or explicitly false, the behavior is to turn the extracted field into a string. (e.g. when set to true, the parameter 123 will resolve to the numerical type, but when false, or not provided, the string \"123\" will be resolved)", + "type": "boolean" + }, "value": { "description": "Value is the default literal value to use for this parameter source This is only used if the DataKey is invalid. If the DataKey is invalid and this is not defined, this param source will produce an error.", "type": "string" @@ -3372,26 +4604,6 @@ } } }, - "io.argoproj.sensor.v1alpha1.TriggerSwitch": { - "description": "TriggerSwitch describes condition which must be satisfied in order to execute a trigger. Depending upon condition type, status of dependency groups is used to evaluate the result. Deprecated: will be removed in v1.5", - "type": "object", - "properties": { - "all": { - "description": "All acts as a AND operator between dependencies", - "type": "array", - "items": { - "type": "string" - } - }, - "any": { - "description": "Any acts as a OR operator between dependencies", - "type": "array", - "items": { - "type": "string" - } - } - } - }, "io.argoproj.sensor.v1alpha1.TriggerTemplate": { "description": "TriggerTemplate is the template that describes trigger specification.", "type": "object", @@ -3411,14 +4623,29 @@ "description": "AzureEventHubs refers to the trigger send an event to an Azure Event Hub.", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.AzureEventHubsTrigger" }, + "azureServiceBus": { + "description": "AzureServiceBus refers to the trigger designed to place messages on Azure Service Bus", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.AzureServiceBusTrigger" + }, "conditions": { "description": "Conditions is the conditions to execute the trigger. For example: \"(dep01 || dep02) \u0026\u0026 dep04\"", "type": "string" }, + "conditionsReset": { + "description": "Criteria to reset the conditons", + "type": "array", + "items": { + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.ConditionsResetCriteria" + } + }, "custom": { "description": "CustomTrigger refers to the trigger designed to connect to a gRPC trigger server and execute a custom trigger.", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.CustomTrigger" }, + "email": { + "description": "Email refers to the trigger designed to send an email notification", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.EmailTrigger" + }, "http": { "description": "HTTP refers to the trigger designed to dispatch a HTTP request with on-the-fly constructable payload.", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.HTTPTrigger" @@ -3447,13 +4674,13 @@ "description": "OpenWhisk refers to the trigger designed to invoke OpenWhisk action.", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.OpenWhiskTrigger" }, + "pulsar": { + "description": "Pulsar refers to the trigger designed to place messages on Pulsar topic.", + "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.PulsarTrigger" + }, "slack": { "description": "Slack refers to the trigger designed to send slack notification message.", "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.SlackTrigger" - }, - "switch": { - "description": "DeprecatedSwitch is the condition to execute the trigger. Deprecated: will be removed in v1.5, use conditions instead", - "$ref": "#/definitions/io.argoproj.sensor.v1alpha1.TriggerSwitch" } } }, @@ -3474,3276 +4701,17539 @@ } } }, - "io.k8s.api.core.v1.Affinity": { - "description": "Affinity is a group of affinity scheduling rules.", - "type": "object", - "properties": { - "nodeAffinity": { - "description": "Node affinity is a group of node affinity scheduling rules.", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", - "type": "object", - "required": [ - "weight", - "preference" - ], - "properties": { - "preference": { - "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", - "type": "object", - "properties": { - "matchExpressions": { - "description": "A list of node selector requirements by node's labels.", - "type": "array", - "items": { - "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "The label key that the selector applies to.", - "type": "string" - }, - "operator": { - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "type": "string" - }, - "values": { - "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "matchFields": { - "description": "A list of node selector requirements by node's fields.", - "type": "array", - "items": { - "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "The label key that the selector applies to.", - "type": "string" - }, - "operator": { - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "type": "string" - }, - "values": { - "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - } - } - }, - "weight": { - "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", - "type": "object", - "required": [ - "nodeSelectorTerms" - ], - "properties": { - "nodeSelectorTerms": { - "description": "Required. A list of node selector terms. The terms are ORed.", - "type": "array", - "items": { - "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", - "type": "object", - "properties": { - "matchExpressions": { - "description": "A list of node selector requirements by node's labels.", - "type": "array", - "items": { - "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "The label key that the selector applies to.", - "type": "string" - }, - "operator": { - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "type": "string" - }, - "values": { - "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "matchFields": { - "description": "A list of node selector requirements by node's fields.", - "type": "array", - "items": { - "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "The label key that the selector applies to.", - "type": "string" - }, - "operator": { - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "type": "string" - }, - "values": { - "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - } - } - } - } - } - } - } - }, - "podAffinity": { - "description": "Pod affinity is a group of inter pod affinity scheduling rules.", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "type": "object", - "required": [ - "weight", - "podAffinityTerm" - ], - "properties": { - "podAffinityTerm": { - "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", - "type": "object", - "required": [ - "topologyKey" - ], - "properties": { - "labelSelector": { - "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "type": "object", - "properties": { - "matchExpressions": { - "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", - "type": "array", - "items": { - "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "key is the label key that the selector applies to.", - "type": "string", - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge" - }, - "operator": { - "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", - "type": "string" - }, - "values": { - "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "matchLabels": { - "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "namespaces": { - "description": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "type": "array", - "items": { - "type": "string" - } - }, - "topologyKey": { - "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", - "type": "string" - } - } - }, - "weight": { - "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", - "type": "object", - "required": [ - "topologyKey" - ], - "properties": { - "labelSelector": { - "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "type": "object", - "properties": { - "matchExpressions": { - "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", - "type": "array", - "items": { - "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "key is the label key that the selector applies to.", - "type": "string", - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge" - }, - "operator": { - "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", - "type": "string" - }, - "values": { - "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "matchLabels": { - "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "namespaces": { - "description": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "type": "array", - "items": { - "type": "string" - } - }, - "topologyKey": { - "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", - "type": "string" - } - } - } - } - } - }, - "podAntiAffinity": { - "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "type": "object", - "required": [ - "weight", - "podAffinityTerm" - ], - "properties": { - "podAffinityTerm": { - "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", - "type": "object", - "required": [ - "topologyKey" - ], - "properties": { - "labelSelector": { - "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "type": "object", - "properties": { - "matchExpressions": { - "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", - "type": "array", - "items": { - "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "key is the label key that the selector applies to.", - "type": "string", - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge" - }, - "operator": { - "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", - "type": "string" - }, - "values": { - "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "matchLabels": { - "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "namespaces": { - "description": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "type": "array", - "items": { - "type": "string" - } - }, - "topologyKey": { - "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", - "type": "string" - } - } - }, - "weight": { - "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", - "type": "object", - "required": [ - "topologyKey" - ], - "properties": { - "labelSelector": { - "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "type": "object", - "properties": { - "matchExpressions": { - "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", - "type": "array", - "items": { - "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "key is the label key that the selector applies to.", - "type": "string", - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge" - }, - "operator": { - "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", - "type": "string" - }, - "values": { - "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "matchLabels": { - "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "namespaces": { - "description": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "type": "array", - "items": { - "type": "string" - } - }, - "topologyKey": { - "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", - "type": "string" - } - } - } - } - } - } - } - }, - "io.k8s.api.core.v1.ConfigMapKeySelector": { - "description": "Selects a key from a ConfigMap.", + "io.k8s.api.admissionregistration.v1.MatchCondition": { + "description": "MatchCondition represents a condition which must by fulfilled for a request to be sent to a webhook.", "type": "object", "required": [ - "key" + "name", + "expression" ], "properties": { - "key": { - "description": "The key to select.", + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", "type": "string" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap or its key must be defined", - "type": "boolean" } } }, - "io.k8s.api.core.v1.Container": { - "description": "A single application container that you want to run within a pod.", + "io.k8s.api.admissionregistration.v1.MutatingWebhook": { + "description": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", "type": "object", "required": [ - "name" + "name", + "clientConfig", + "sideEffects", + "admissionReviewVersions" ], "properties": { - "args": { - "description": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "admissionReviewVersions": { + "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", "type": "array", "items": { "type": "string" } }, - "command": { - "description": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "type": "array", - "items": { - "type": "string" - } + "clientConfig": { + "description": "ClientConfig defines how to communicate with the hook. Required", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig" }, - "env": { - "description": "List of environment variables to set in the container. Cannot be updated.", + "failurePolicy": { + "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", "type": "array", "items": { - "description": "EnvVar represents an environment variable present in a Container.", - "type": "object", - "required": [ - "name" - ], - "properties": { - "name": { - "description": "Name of the environment variable. Must be a C_IDENTIFIER.", - "type": "string" - }, - "value": { - "description": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", - "type": "string" - }, - "valueFrom": { - "description": "EnvVarSource represents a source for the value of an EnvVar.", - "type": "object", - "properties": { - "configMapKeyRef": { - "description": "Selects a key from a ConfigMap.", - "type": "object", - "required": [ - "key" - ], - "properties": { - "key": { - "description": "The key to select.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap or its key must be defined", - "type": "boolean" - } - } - }, - "fieldRef": { - "description": "ObjectFieldSelector selects an APIVersioned field of an object.", - "type": "object", - "required": [ - "fieldPath" - ], - "properties": { - "apiVersion": { - "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", - "type": "string" - }, - "fieldPath": { - "description": "Path of the field to select in the specified API version.", - "type": "string" - } - } - }, - "resourceFieldRef": { - "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", - "type": "object", - "required": [ - "resource" - ], - "properties": { - "containerName": { - "description": "Container name: required for volumes, optional for env vars", - "type": "string" - }, - "divisor": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - }, - "resource": { - "description": "Required: resource to select", - "type": "string" - } - } - }, - "secretKeyRef": { - "description": "SecretKeySelector selects a key of a Secret.", - "type": "object", - "required": [ - "key" - ], - "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - } - } - } - } - } + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" }, - "envFrom": { - "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - "type": "array", - "items": { - "description": "EnvFromSource represents the source of a set of ConfigMaps", - "type": "object", - "properties": { - "configMapRef": { - "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap must be defined", - "type": "boolean" - } - } - }, - "prefix": { - "description": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", - "type": "string" - }, - "secretRef": { - "description": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret must be defined", - "type": "boolean" - } - } - } - } - } - }, - "image": { - "description": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - "type": "string" - }, - "imagePullPolicy": { - "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "matchPolicy": { + "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", "type": "string" }, - "lifecycle": { - "description": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "type": "object", - "properties": { - "postStart": { - "description": "Handler defines a specific action that should be taken", - "type": "object", - "properties": { - "exec": { - "description": "ExecAction describes a \"run in container\" action.", - "type": "object", - "properties": { - "command": { - "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "httpGet": { - "description": "HTTPGetAction describes an action based on HTTP Get requests.", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - "type": "string" - }, - "httpHeaders": { - "description": "Custom headers to set in the request. HTTP allows repeated headers.", - "type": "array", - "items": { - "description": "HTTPHeader describes a custom header to be used in HTTP probes", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "The header field name", - "type": "string" - }, - "value": { - "description": "The header field value", - "type": "string" - } - } - } - }, - "path": { - "description": "Path to access on the HTTP server.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - }, - "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP.", - "type": "string" - } - } - }, - "tcpSocket": { - "description": "TCPSocketAction describes an action based on opening a socket", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Optional: Host name to connect to, defaults to the pod IP.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - } - } - } - } - }, - "preStop": { - "description": "Handler defines a specific action that should be taken", - "type": "object", - "properties": { - "exec": { - "description": "ExecAction describes a \"run in container\" action.", - "type": "object", - "properties": { - "command": { - "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "httpGet": { - "description": "HTTPGetAction describes an action based on HTTP Get requests.", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - "type": "string" - }, - "httpHeaders": { - "description": "Custom headers to set in the request. HTTP allows repeated headers.", - "type": "array", - "items": { - "description": "HTTPHeader describes a custom header to be used in HTTP probes", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "The header field name", - "type": "string" - }, - "value": { - "description": "The header field value", - "type": "string" - } - } - } - }, - "path": { - "description": "Path to access on the HTTP server.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - }, - "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP.", - "type": "string" - } - } - }, - "tcpSocket": { - "description": "TCPSocketAction describes an action based on opening a socket", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Optional: Host name to connect to, defaults to the pod IP.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - } - } - } - } - } - } - }, - "livenessProbe": { - "description": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "type": "object", - "properties": { - "exec": { - "description": "ExecAction describes a \"run in container\" action.", - "type": "object", - "properties": { - "command": { - "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "failureThreshold": { - "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "httpGet": { - "description": "HTTPGetAction describes an action based on HTTP Get requests.", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - "type": "string" - }, - "httpHeaders": { - "description": "Custom headers to set in the request. HTTP allows repeated headers.", - "type": "array", - "items": { - "description": "HTTPHeader describes a custom header to be used in HTTP probes", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "The header field name", - "type": "string" - }, - "value": { - "description": "The header field value", - "type": "string" - } - } - } - }, - "path": { - "description": "Path to access on the HTTP server.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - }, - "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP.", - "type": "string" - } - } - }, - "initialDelaySeconds": { - "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "type": "integer", - "format": "int32" - }, - "periodSeconds": { - "description": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "successThreshold": { - "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "tcpSocket": { - "description": "TCPSocketAction describes an action based on opening a socket", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Optional: Host name to connect to, defaults to the pod IP.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - } - } - }, - "timeoutSeconds": { - "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "type": "integer", - "format": "int32" - } - } - }, "name": { - "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", "type": "string" }, - "ports": { - "description": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - "type": "array", - "items": { - "description": "ContainerPort represents a network port in a single container.", - "type": "object", - "required": [ - "containerPort" - ], - "properties": { - "containerPort": { - "description": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.", - "type": "integer", - "format": "int32" - }, - "hostIP": { - "description": "What host IP to bind the external port to.", - "type": "string" - }, - "hostPort": { - "description": "Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", - "type": "integer", - "format": "int32" - }, - "name": { - "description": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", - "type": "string" - }, - "protocol": { - "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", - "type": "string" - } - } - }, - "x-kubernetes-list-map-keys": [ - "containerPort", - "protocol" - ], - "x-kubernetes-list-type": "map", - "x-kubernetes-patch-merge-key": "containerPort", - "x-kubernetes-patch-strategy": "merge" - }, - "readinessProbe": { - "description": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "type": "object", - "properties": { - "exec": { - "description": "ExecAction describes a \"run in container\" action.", - "type": "object", - "properties": { - "command": { - "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "failureThreshold": { - "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "httpGet": { - "description": "HTTPGetAction describes an action based on HTTP Get requests.", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - "type": "string" - }, - "httpHeaders": { - "description": "Custom headers to set in the request. HTTP allows repeated headers.", - "type": "array", - "items": { - "description": "HTTPHeader describes a custom header to be used in HTTP probes", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "The header field name", - "type": "string" - }, - "value": { - "description": "The header field value", - "type": "string" - } - } - } - }, - "path": { - "description": "Path to access on the HTTP server.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - }, - "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP.", - "type": "string" - } - } - }, - "initialDelaySeconds": { - "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "type": "integer", - "format": "int32" - }, - "periodSeconds": { - "description": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "successThreshold": { - "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "tcpSocket": { - "description": "TCPSocketAction describes an action based on opening a socket", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Optional: Host name to connect to, defaults to the pod IP.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - } - } - }, - "timeoutSeconds": { - "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "type": "integer", - "format": "int32" - } - } + "namespaceSelector": { + "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, - "resources": { - "description": "ResourceRequirements describes the compute resource requirements.", - "type": "object", - "properties": { - "limits": { - "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - } - }, - "requests": { - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - } - } - } + "objectSelector": { + "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, - "securityContext": { - "description": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", - "type": "object", - "properties": { - "allowPrivilegeEscalation": { - "description": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", - "type": "boolean" - }, - "capabilities": { - "description": "Adds and removes POSIX capabilities from running containers.", - "type": "object", - "properties": { - "add": { - "description": "Added capabilities", - "type": "array", - "items": { - "type": "string" - } - }, - "drop": { - "description": "Removed capabilities", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "privileged": { - "description": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", - "type": "boolean" - }, - "procMount": { - "description": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.", - "type": "string" - }, - "readOnlyRootFilesystem": { - "description": "Whether this container has a read-only root filesystem. Default is false.", - "type": "boolean" - }, - "runAsGroup": { - "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "type": "integer", - "format": "int64" - }, - "runAsNonRoot": { - "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "type": "boolean" - }, - "runAsUser": { - "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "type": "integer", - "format": "int64" - }, - "seLinuxOptions": { - "description": "SELinuxOptions are the labels to be applied to the container", - "type": "object", - "properties": { - "level": { - "description": "Level is SELinux level label that applies to the container.", - "type": "string" - }, - "role": { - "description": "Role is a SELinux role label that applies to the container.", - "type": "string" - }, - "type": { - "description": "Type is a SELinux type label that applies to the container.", - "type": "string" - }, - "user": { - "description": "User is a SELinux user label that applies to the container.", - "type": "string" - } - } - }, - "seccompProfile": { - "description": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.", - "type": "object", - "required": [ - "type" - ], - "properties": { - "localhostProfile": { - "description": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".", - "type": "string" - }, - "type": { - "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", - "type": "string" - } - }, - "x-kubernetes-unions": [ - { - "discriminator": "type", - "fields-to-discriminateBy": { - "localhostProfile": "LocalhostProfile" - } - } - ] - }, - "windowsOptions": { - "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", - "type": "object", - "properties": { - "gmsaCredentialSpec": { - "description": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", - "type": "string" - }, - "gmsaCredentialSpecName": { - "description": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", - "type": "string" - }, - "runAsUserName": { - "description": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "type": "string" - } - } - } - } + "reinvocationPolicy": { + "description": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", + "type": "string" }, - "startupProbe": { - "description": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "type": "object", - "properties": { - "exec": { - "description": "ExecAction describes a \"run in container\" action.", - "type": "object", - "properties": { - "command": { - "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "failureThreshold": { - "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "httpGet": { - "description": "HTTPGetAction describes an action based on HTTP Get requests.", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - "type": "string" - }, - "httpHeaders": { - "description": "Custom headers to set in the request. HTTP allows repeated headers.", - "type": "array", - "items": { - "description": "HTTPHeader describes a custom header to be used in HTTP probes", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "The header field name", - "type": "string" - }, - "value": { - "description": "The header field value", - "type": "string" - } - } - } - }, - "path": { - "description": "Path to access on the HTTP server.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - }, - "scheme": { - "description": "Scheme to use for connecting to the host. Defaults to HTTP.", - "type": "string" - } - } - }, - "initialDelaySeconds": { - "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "type": "integer", - "format": "int32" - }, - "periodSeconds": { - "description": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "successThreshold": { - "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", - "type": "integer", - "format": "int32" - }, - "tcpSocket": { - "description": "TCPSocketAction describes an action based on opening a socket", - "type": "object", - "required": [ - "port" - ], - "properties": { - "host": { - "description": "Optional: Host name to connect to, defaults to the pod IP.", - "type": "string" - }, - "port": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", - "type": "string", - "format": "int-or-string" - } - } - }, - "timeoutSeconds": { - "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "type": "integer", - "format": "int32" - } + "rules": { + "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" } }, - "stdin": { - "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - "type": "boolean" - }, - "stdinOnce": { - "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - "type": "boolean" + "sideEffects": { + "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "type": "string" }, - "terminationMessagePath": { - "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "timeoutSeconds": { + "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration": { + "description": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, - "terminationMessagePolicy": { - "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, - "tty": { - "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", - "type": "boolean" + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "volumeDevices": { - "description": "volumeDevices is the list of block devices to be used by the container.", + "webhooks": { + "description": "Webhooks is a list of webhooks and the affected resources and operations.", "type": "array", "items": { - "description": "volumeDevice describes a mapping of a raw block device within a container.", - "type": "object", - "required": [ - "name", - "devicePath" - ], - "properties": { - "devicePath": { - "description": "devicePath is the path inside of the container that the device will be mapped to.", - "type": "string" - }, - "name": { - "description": "name must match the name of a persistentVolumeClaim in the pod", - "type": "string" - } - } + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhook" }, - "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-merge-key": "name", "x-kubernetes-patch-strategy": "merge" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "MutatingWebhookConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.MutatingWebhookConfigurationList": { + "description": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" }, - "volumeMounts": { - "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "items": { + "description": "List of MutatingWebhookConfiguration.", "type": "array", "items": { - "description": "VolumeMount describes a mounting of a Volume within a container.", - "type": "object", - "required": [ - "name", - "mountPath" - ], - "properties": { - "mountPath": { - "description": "Path within the container at which the volume should be mounted. Must not contain ':'.", - "type": "string" - }, - "mountPropagation": { - "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "type": "string" - }, - "name": { - "description": "This must match the Name of a Volume.", - "type": "string" - }, - "readOnly": { - "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - "type": "boolean" - }, - "subPath": { - "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", - "type": "string" - }, - "subPathExpr": { - "description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", - "type": "string" - } - } - }, - "x-kubernetes-patch-merge-key": "mountPath", - "x-kubernetes-patch-strategy": "merge" + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MutatingWebhookConfiguration" + } }, - "workingDir": { - "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" } - } - }, - "io.k8s.api.core.v1.LocalObjectReference": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "MutatingWebhookConfigurationList", + "version": "v1" } - } + ] }, - "io.k8s.api.core.v1.PodSecurityContext": { - "description": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "io.k8s.api.admissionregistration.v1.RuleWithOperations": { + "description": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", "type": "object", "properties": { - "fsGroup": { - "description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume.", - "type": "integer", - "format": "int64" + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" }, - "fsGroupChangePolicy": { - "description": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", - "type": "string" + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" }, - "runAsGroup": { - "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "type": "integer", - "format": "int64" - }, - "runAsNonRoot": { - "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "type": "boolean" - }, - "runAsUser": { - "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "type": "integer", - "format": "int64" - }, - "seLinuxOptions": { - "description": "SELinuxOptions are the labels to be applied to the container", - "type": "object", - "properties": { - "level": { - "description": "Level is SELinux level label that applies to the container.", - "type": "string" - }, - "role": { - "description": "Role is a SELinux role label that applies to the container.", - "type": "string" - }, - "type": { - "description": "Type is a SELinux type label that applies to the container.", - "type": "string" - }, - "user": { - "description": "User is a SELinux user label that applies to the container.", - "type": "string" - } - } - }, - "seccompProfile": { - "description": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.", - "type": "object", - "required": [ - "type" - ], - "properties": { - "localhostProfile": { - "description": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must only be set if type is \"Localhost\".", - "type": "string" - }, - "type": { - "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", - "type": "string" - } + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" }, - "x-kubernetes-unions": [ - { - "discriminator": "type", - "fields-to-discriminateBy": { - "localhostProfile": "LocalhostProfile" - } - } - ] + "x-kubernetes-list-type": "atomic" }, - "supplementalGroups": { - "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", "type": "array", "items": { - "type": "integer", - "format": "int64" - } - }, - "sysctls": { - "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "type": "array", - "items": { - "description": "Sysctl defines a kernel parameter to be set", - "type": "object", - "required": [ - "name", - "value" - ], - "properties": { - "name": { - "description": "Name of a property to set", - "type": "string" - }, - "value": { - "description": "Value of a property to set", - "type": "string" - } - } - } + "type": "string" + }, + "x-kubernetes-list-type": "atomic" }, - "windowsOptions": { - "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", - "type": "object", - "properties": { - "gmsaCredentialSpec": { - "description": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", - "type": "string" - }, - "gmsaCredentialSpecName": { - "description": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", - "type": "string" - }, - "runAsUserName": { - "description": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "type": "string" - } - } + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", + "type": "string" } } }, - "io.k8s.api.core.v1.ResourceRequirements": { - "description": "ResourceRequirements describes the compute resource requirements.", + "io.k8s.api.admissionregistration.v1.ServiceReference": { + "description": "ServiceReference holds a reference to Service.legacy.k8s.io", "type": "object", + "required": [ + "namespace", + "name" + ], "properties": { - "limits": { - "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - } + "name": { + "description": "`name` is the name of the service. Required", + "type": "string" }, - "requests": { - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - } + "namespace": { + "description": "`namespace` is the namespace of the service. Required", + "type": "string" + }, + "path": { + "description": "`path` is an optional URL path which will be sent in any request to this service.", + "type": "string" + }, + "port": { + "description": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", + "type": "integer", + "format": "int32" } } }, - "io.k8s.api.core.v1.SecretKeySelector": { - "description": "SecretKeySelector selects a key of a Secret.", + "io.k8s.api.admissionregistration.v1.ValidatingWebhook": { + "description": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", "type": "object", "required": [ - "key" + "name", + "clientConfig", + "sideEffects", + "admissionReviewVersions" ], "properties": { - "key": { - "description": "The key of the secret to select from. Must be a valid secret key.", + "admissionReviewVersions": { + "description": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy.", + "type": "array", + "items": { + "type": "string" + } + }, + "clientConfig": { + "description": "ClientConfig defines how to communicate with the hook. Required", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.WebhookClientConfig" + }, + "failurePolicy": { + "description": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be sent to this webhook. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the webhook is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the webhook is called.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the error is ignored and the webhook is skipped\n\nThis is a beta feature and managed by the AdmissionWebhookMatchConditions feature gate.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.MatchCondition" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Equivalent\"", "type": "string" }, "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", "type": "string" }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" + "namespaceSelector": { + "description": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "objectSelector": { + "description": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "rules": { + "description": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.RuleWithOperations" + } + }, + "sideEffects": { + "description": "SideEffects states whether this webhook has side effects. Acceptable values are: None, NoneOnDryRun (webhooks created via v1beta1 may also specify Some or Unknown). Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission chain and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some.", + "type": "string" + }, + "timeoutSeconds": { + "description": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 10 seconds.", + "type": "integer", + "format": "int32" } } }, - "io.k8s.api.core.v1.ServicePort": { - "description": "ServicePort contains information on service's port.", + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration": { + "description": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", "type": "object", - "required": [ - "port" - ], "properties": { - "appProtocol": { - "description": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. This is a beta field that is guarded by the ServiceAppProtocol feature gate and enabled by default.", + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" }, - "name": { - "description": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, - "nodePort": { - "description": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", - "type": "integer", - "format": "int32" + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "port": { - "description": "The port that will be exposed by this service.", - "type": "integer", - "format": "int32" + "webhooks": { + "description": "Webhooks is a list of webhooks and the affected resources and operations.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhook" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingWebhookConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.ValidatingWebhookConfigurationList": { + "description": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" }, - "protocol": { - "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + "items": { + "description": "List of ValidatingWebhookConfiguration.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ValidatingWebhookConfiguration" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, - "targetPort": { - "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingWebhookConfigurationList", + "version": "v1" + } + ] + }, + "io.k8s.api.admissionregistration.v1.WebhookClientConfig": { + "description": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "type": "object", + "properties": { + "caBundle": { + "description": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", "type": "string", - "format": "int-or-string" + "format": "byte" + }, + "service": { + "description": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1.ServiceReference" + }, + "url": { + "description": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "type": "string" } } }, - "io.k8s.api.core.v1.Toleration": { - "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", + "io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation": { + "description": "AuditAnnotation describes how to produce an audit annotation for an API request.", "type": "object", + "required": [ + "key", + "valueExpression" + ], "properties": { - "effect": { - "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", - "type": "string" - }, "key": { - "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "description": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", "type": "string" }, - "operator": { - "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "valueExpression": { + "description": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning": { + "description": "ExpressionWarning is a warning information that targets a specific expression.", + "type": "object", + "required": [ + "fieldRef", + "warning" + ], + "properties": { + "fieldRef": { + "description": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", "type": "string" }, - "tolerationSeconds": { - "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", - "type": "integer", - "format": "int64" - }, - "value": { - "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "warning": { + "description": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", "type": "string" } } }, - "io.k8s.api.core.v1.Volume": { - "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "io.k8s.api.admissionregistration.v1alpha1.MatchCondition": { "type": "object", "required": [ - "name" + "name", + "expression" ], "properties": { - "awsElasticBlockStore": { - "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - "type": "object", - "required": [ - "volumeID" - ], - "properties": { - "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "type": "string" - }, - "partition": { - "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - "type": "integer", - "format": "int32" - }, - "readOnly": { - "description": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "type": "boolean" - }, - "volumeID": { - "description": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "type": "string" - } - } + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", + "type": "string" }, - "azureDisk": { - "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "type": "object", - "required": [ - "diskName", - "diskURI" - ], - "properties": { - "cachingMode": { - "description": "Host Caching mode: None, Read Only, Read Write.", - "type": "string" - }, - "diskName": { - "description": "The Name of the data disk in the blob storage", - "type": "string" - }, - "diskURI": { - "description": "The URI the data disk in the blob storage", - "type": "string" - }, - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "type": "string" - }, - "kind": { - "description": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", - "type": "string" - }, - "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "type": "boolean" - } - } + "name": { + "description": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1alpha1.MatchResources": { + "description": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "type": "object", + "properties": { + "excludeResourceRules": { + "description": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations" + }, + "x-kubernetes-list-type": "atomic" }, - "azureFile": { - "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "type": "object", - "required": [ - "secretName", - "shareName" - ], - "properties": { - "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "type": "boolean" - }, - "secretName": { - "description": "the name of secret that contains Azure Storage Account Name and Key", - "type": "string" - }, - "shareName": { - "description": "Share Name", - "type": "string" - } - } + "matchPolicy": { + "description": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", + "type": "string" }, - "cephfs": { - "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "type": "object", - "required": [ - "monitors" - ], - "properties": { - "monitors": { - "description": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "array", - "items": { - "type": "string" - } - }, - "path": { - "description": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "type": "string" - }, - "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "boolean" - }, - "secretFile": { - "description": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "string" - }, - "secretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - }, - "user": { - "description": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "type": "string" - } - } + "namespaceSelector": { + "description": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" }, - "cinder": { - "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "type": "object", - "required": [ - "volumeID" - ], - "properties": { - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "type": "string" - }, - "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "type": "boolean" - }, - "secretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - }, - "volumeID": { - "description": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "type": "string" - } - } + "objectSelector": { + "description": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "resourceRules": { + "description": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations" + }, + "x-kubernetes-list-type": "atomic" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.NamedRuleWithOperations": { + "description": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "type": "object", + "properties": { + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.ParamKind": { + "description": "ParamKind is a tuple of Group Kind and Version.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "type": "string" + }, + "kind": { + "description": "Kind is the API kind the resources belong to. Required.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.ParamRef": { + "description": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "type": "object", + "properties": { + "name": { + "description": "`name` is the name of the resource being referenced.\n\n`name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "type": "string" + }, + "parameterNotFoundAction": { + "description": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny` Default to `Deny`", + "type": "string" + }, + "selector": { + "description": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1alpha1.TypeChecking": { + "description": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "type": "object", + "properties": { + "expressionWarnings": { + "description": "The type checking warnings for each expression.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ExpressionWarning" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy": { + "description": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec" + }, + "status": { + "description": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicy", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding": { + "description": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBinding", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingList": { + "description": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of PolicyBinding.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBinding" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBindingList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyBindingSpec": { + "description": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "type": "object", + "properties": { + "matchResources": { + "description": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.MatchResources" + }, + "paramRef": { + "description": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ParamRef" + }, + "policyName": { + "description": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "type": "string" + }, + "validationActions": { + "description": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyList": { + "description": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingAdmissionPolicy.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicy" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicySpec": { + "description": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "type": "object", + "properties": { + "auditAnnotations": { + "description": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.AuditAnnotation" + }, + "x-kubernetes-list-type": "atomic" + }, + "failurePolicy": { + "description": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.MatchCondition" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchConstraints": { + "description": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.MatchResources" + }, + "paramKind": { + "description": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.ParamKind" + }, + "validations": { + "description": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.Validation" + }, + "x-kubernetes-list-type": "atomic" + }, + "variables": { + "description": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.Variable" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.admissionregistration.v1alpha1.ValidatingAdmissionPolicyStatus": { + "description": "ValidatingAdmissionPolicyStatus represents the status of a ValidatingAdmissionPolicy.", + "type": "object", + "properties": { + "conditions": { + "description": "The conditions represent the latest available observations of a policy's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "observedGeneration": { + "description": "The generation observed by the controller.", + "type": "integer", + "format": "int64" + }, + "typeChecking": { + "description": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1alpha1.TypeChecking" + } + } + }, + "io.k8s.api.admissionregistration.v1alpha1.Validation": { + "description": "Validation specifies the CEL expression which is used to apply the validation.", + "type": "object", + "required": [ + "expression" + ], + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ \u003e 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop \u003e 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "type": "string" + }, + "messageExpression": { + "description": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", + "type": "string" + }, + "reason": { + "description": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1alpha1.Variable": { + "description": "Variable is the definition of a variable that is used for composition.", + "type": "object", + "required": [ + "name", + "expression" + ], + "properties": { + "expression": { + "description": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", + "type": "string" + }, + "name": { + "description": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.AuditAnnotation": { + "description": "AuditAnnotation describes how to produce an audit annotation for an API request.", + "type": "object", + "required": [ + "key", + "valueExpression" + ], + "properties": { + "key": { + "description": "key specifies the audit annotation key. The audit annotation keys of a ValidatingAdmissionPolicy must be unique. The key must be a qualified name ([A-Za-z0-9][-A-Za-z0-9_.]*) no more than 63 bytes in length.\n\nThe key is combined with the resource name of the ValidatingAdmissionPolicy to construct an audit annotation key: \"{ValidatingAdmissionPolicy name}/{key}\".\n\nIf an admission webhook uses the same resource name as this ValidatingAdmissionPolicy and the same audit annotation key, the annotation key will be identical. In this case, the first annotation written with the key will be included in the audit event and all subsequent annotations with the same key will be discarded.\n\nRequired.", + "type": "string" + }, + "valueExpression": { + "description": "valueExpression represents the expression which is evaluated by CEL to produce an audit annotation value. The expression must evaluate to either a string or null value. If the expression evaluates to a string, the audit annotation is included with the string value. If the expression evaluates to null or empty string the audit annotation will be omitted. The valueExpression may be no longer than 5kb in length. If the result of the valueExpression is more than 10kb in length, it will be truncated to 10kb.\n\nIf multiple ValidatingAdmissionPolicyBinding resources match an API request, then the valueExpression will be evaluated for each binding. All unique values produced by the valueExpressions will be joined together in a comma-separated list.\n\nRequired.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.ExpressionWarning": { + "description": "ExpressionWarning is a warning information that targets a specific expression.", + "type": "object", + "required": [ + "fieldRef", + "warning" + ], + "properties": { + "fieldRef": { + "description": "The path to the field that refers the expression. For example, the reference to the expression of the first item of validations is \"spec.validations[0].expression\"", + "type": "string" + }, + "warning": { + "description": "The content of type checking information in a human-readable form. Each line of the warning contains the type that the expression is checked against, followed by the type check error from the compiler.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.MatchCondition": { + "description": "MatchCondition represents a condition which must be fulfilled for a request to be sent to a webhook.", + "type": "object", + "required": [ + "name", + "expression" + ], + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables:\n\n'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\nDocumentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/\n\nRequired.", + "type": "string" + }, + "name": { + "description": "Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')\n\nRequired.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.MatchResources": { + "description": "MatchResources decides whether to run the admission control policy on an object based on whether it meets the match criteria. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "type": "object", + "properties": { + "excludeResourceRules": { + "description": "ExcludeResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy should not care about. The exclude rules take precedence over include rules (if a resource matches both, it is excluded)", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations" + }, + "x-kubernetes-list-type": "atomic" + }, + "matchPolicy": { + "description": "matchPolicy defines how the \"MatchResources\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the ValidatingAdmissionPolicy.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the ValidatingAdmissionPolicy.\n\nDefaults to \"Equivalent\"", + "type": "string" + }, + "namespaceSelector": { + "description": "NamespaceSelector decides whether to run the admission control policy on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the policy.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the policy on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "objectSelector": { + "description": "ObjectSelector decides whether to run the validation based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the cel validation, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "resourceRules": { + "description": "ResourceRules describes what operations on what resources/subresources the ValidatingAdmissionPolicy matches. The policy cares about an operation if it matches _any_ Rule.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations" + }, + "x-kubernetes-list-type": "atomic" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.NamedRuleWithOperations": { + "description": "NamedRuleWithOperations is a tuple of Operations and Resources with ResourceNames.", + "type": "object", + "properties": { + "apiGroups": { + "description": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "apiVersions": { + "description": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "operations": { + "description": "Operations is the operations the admission hook cares about - CREATE, UPDATE, DELETE, CONNECT or * for all of those operations and any future admission operations that are added. If '*' is present, the length of the slice must be one. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "scope": { + "description": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.ParamKind": { + "description": "ParamKind is a tuple of Group Kind and Version.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion is the API group version the resources belong to. In format of \"group/version\". Required.", + "type": "string" + }, + "kind": { + "description": "Kind is the API kind the resources belong to. Required.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.ParamRef": { + "description": "ParamRef describes how to locate the params to be used as input to expressions of rules applied by a policy binding.", + "type": "object", + "properties": { + "name": { + "description": "name is the name of the resource being referenced.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.\n\nA single parameter used for all admission requests can be configured by setting the `name` field, leaving `selector` blank, and setting namespace if `paramKind` is namespace-scoped.", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the referenced resource. Allows limiting the search for params to a specific namespace. Applies to both `name` and `selector` fields.\n\nA per-namespace parameter may be used by specifying a namespace-scoped `paramKind` in the policy and leaving this field empty.\n\n- If `paramKind` is cluster-scoped, this field MUST be unset. Setting this field results in a configuration error.\n\n- If `paramKind` is namespace-scoped, the namespace of the object being evaluated for admission will be used when this field is left unset. Take care that if this is left empty the binding must not match any cluster-scoped resources, which will result in an error.", + "type": "string" + }, + "parameterNotFoundAction": { + "description": "`parameterNotFoundAction` controls the behavior of the binding when the resource exists, and name or selector is valid, but there are no parameters matched by the binding. If the value is set to `Allow`, then no matched parameters will be treated as successful validation by the binding. If set to `Deny`, then no matched parameters will be subject to the `failurePolicy` of the policy.\n\nAllowed values are `Allow` or `Deny`\n\nRequired", + "type": "string" + }, + "selector": { + "description": "selector can be used to match multiple param objects based on their labels. Supply selector: {} to match all resources of the ParamKind.\n\nIf multiple params are found, they are all evaluated with the policy expressions and the results are ANDed together.\n\nOne of `name` or `selector` must be set, but `name` and `selector` are mutually exclusive properties. If one is set, the other must be unset.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.admissionregistration.v1beta1.TypeChecking": { + "description": "TypeChecking contains results of type checking the expressions in the ValidatingAdmissionPolicy", + "type": "object", + "properties": { + "expressionWarnings": { + "description": "The type checking warnings for each expression.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ExpressionWarning" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy": { + "description": "ValidatingAdmissionPolicy describes the definition of an admission validation policy that accepts or rejects an object without changing it.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicy.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec" + }, + "status": { + "description": "The status of the ValidatingAdmissionPolicy, including warnings that are useful to determine if the policy behaves in the expected way. Populated by the system. Read-only.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicy", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding": { + "description": "ValidatingAdmissionPolicyBinding binds the ValidatingAdmissionPolicy with paramerized resources. ValidatingAdmissionPolicyBinding and parameter CRDs together define how cluster administrators configure policies for clusters.\n\nFor a given admission request, each binding will cause its policy to be evaluated N times, where N is 1 for policies/bindings that don't use params, otherwise N is the number of parameters selected by the binding.\n\nThe CEL expressions of a policy must have a computed CEL cost below the maximum CEL budget. Each evaluation of the policy is given an independent CEL cost budget. Adding/removing policies, bindings, or params can not affect whether a given (policy, binding, param) combination is within its own CEL budget.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the ValidatingAdmissionPolicyBinding.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBinding", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingList": { + "description": "ValidatingAdmissionPolicyBindingList is a list of ValidatingAdmissionPolicyBinding.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of PolicyBinding.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBinding" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyBindingList", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyBindingSpec": { + "description": "ValidatingAdmissionPolicyBindingSpec is the specification of the ValidatingAdmissionPolicyBinding.", + "type": "object", + "properties": { + "matchResources": { + "description": "MatchResources declares what resources match this binding and will be validated by it. Note that this is intersected with the policy's matchConstraints, so only requests that are matched by the policy can be selected by this. If this is unset, all resources matched by the policy are validated by this binding When resourceRules is unset, it does not constrain resource matching. If a resource is matched by the other fields of this object, it will be validated. Note that this is differs from ValidatingAdmissionPolicy matchConstraints, where resourceRules are required.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.MatchResources" + }, + "paramRef": { + "description": "paramRef specifies the parameter resource used to configure the admission control policy. It should point to a resource of the type specified in ParamKind of the bound ValidatingAdmissionPolicy. If the policy specifies a ParamKind and the resource referred to by ParamRef does not exist, this binding is considered mis-configured and the FailurePolicy of the ValidatingAdmissionPolicy applied. If the policy does not specify a ParamKind then this field is ignored, and the rules are evaluated without a param.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ParamRef" + }, + "policyName": { + "description": "PolicyName references a ValidatingAdmissionPolicy name which the ValidatingAdmissionPolicyBinding binds to. If the referenced resource does not exist, this binding is considered invalid and will be ignored Required.", + "type": "string" + }, + "validationActions": { + "description": "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyList": { + "description": "ValidatingAdmissionPolicyList is a list of ValidatingAdmissionPolicy.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ValidatingAdmissionPolicy.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicy" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "admissionregistration.k8s.io", + "kind": "ValidatingAdmissionPolicyList", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicySpec": { + "description": "ValidatingAdmissionPolicySpec is the specification of the desired behavior of the AdmissionPolicy.", + "type": "object", + "properties": { + "auditAnnotations": { + "description": "auditAnnotations contains CEL expressions which are used to produce audit annotations for the audit event of the API request. validations and auditAnnotations may not both be empty; a least one of validations or auditAnnotations is required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.AuditAnnotation" + }, + "x-kubernetes-list-type": "atomic" + }, + "failurePolicy": { + "description": "failurePolicy defines how to handle failures for the admission policy. Failures can occur from CEL expression parse errors, type check errors, runtime errors and invalid or mis-configured policy definitions or bindings.\n\nA policy is invalid if spec.paramKind refers to a non-existent Kind. A binding is invalid if spec.paramRef.name refers to a non-existent resource.\n\nfailurePolicy does not define how validations that evaluate to false are handled.\n\nWhen failurePolicy is set to Fail, ValidatingAdmissionPolicyBinding validationActions define how failures are enforced.\n\nAllowed values are Ignore or Fail. Defaults to Fail.", + "type": "string" + }, + "matchConditions": { + "description": "MatchConditions is a list of conditions that must be met for a request to be validated. Match conditions filter requests that have already been matched by the rules, namespaceSelector, and objectSelector. An empty list of matchConditions matches all requests. There are a maximum of 64 match conditions allowed.\n\nIf a parameter object is provided, it can be accessed via the `params` handle in the same manner as validation expressions.\n\nThe exact matching logic is (in order):\n 1. If ANY matchCondition evaluates to FALSE, the policy is skipped.\n 2. If ALL matchConditions evaluate to TRUE, the policy is evaluated.\n 3. If any matchCondition evaluates to an error (but none are FALSE):\n - If failurePolicy=Fail, reject the request\n - If failurePolicy=Ignore, the policy is skipped", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.MatchCondition" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "matchConstraints": { + "description": "MatchConstraints specifies what resources this policy is designed to validate. The AdmissionPolicy cares about a request if it matches _all_ Constraints. However, in order to prevent clusters from being put into an unstable state that cannot be recovered from via the API ValidatingAdmissionPolicy cannot match ValidatingAdmissionPolicy and ValidatingAdmissionPolicyBinding. Required.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.MatchResources" + }, + "paramKind": { + "description": "ParamKind specifies the kind of resources used to parameterize this policy. If absent, there are no parameters for this policy and the param CEL variable will not be provided to validation expressions. If ParamKind refers to a non-existent kind, this policy definition is mis-configured and the FailurePolicy is applied. If paramKind is specified but paramRef is unset in ValidatingAdmissionPolicyBinding, the params variable will be null.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.ParamKind" + }, + "validations": { + "description": "Validations contain CEL expressions which is used to apply the validation. Validations and AuditAnnotations may not both be empty; a minimum of one Validations or AuditAnnotations is required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.Validation" + }, + "x-kubernetes-list-type": "atomic" + }, + "variables": { + "description": "Variables contain definitions of variables that can be used in composition of other expressions. Each variable is defined as a named CEL expression. The variables defined here will be available under `variables` in other expressions of the policy except MatchConditions because MatchConditions are evaluated before the rest of the policy.\n\nThe expression of a variable can refer to other variables defined earlier in the list but not those after. Thus, Variables must be sorted by the order of first appearance and acyclic.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.Variable" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.ValidatingAdmissionPolicyStatus": { + "description": "ValidatingAdmissionPolicyStatus represents the status of an admission validation policy.", + "type": "object", + "properties": { + "conditions": { + "description": "The conditions represent the latest available observations of a policy's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "observedGeneration": { + "description": "The generation observed by the controller.", + "type": "integer", + "format": "int64" + }, + "typeChecking": { + "description": "The results of type checking for each expression. Presence of this field indicates the completion of the type checking.", + "$ref": "#/definitions/io.k8s.api.admissionregistration.v1beta1.TypeChecking" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.Validation": { + "description": "Validation specifies the CEL expression which is used to apply the validation.", + "type": "object", + "required": [ + "expression" + ], + "properties": { + "expression": { + "description": "Expression represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec CEL expressions have access to the contents of the API request/response, organized into CEL variables as well as some other useful variables:\n\n- 'object' - The object from the incoming request. The value is null for DELETE requests. - 'oldObject' - The existing object. The value is null for CREATE requests. - 'request' - Attributes of the API request([ref](/pkg/apis/admission/types.go#AdmissionRequest)). - 'params' - Parameter resource referred to by the policy binding being evaluated. Only populated if the policy has a ParamKind. - 'namespaceObject' - The namespace object that the incoming object belongs to. The value is null for cluster-scoped resources. - 'variables' - Map of composited variables, from its name to its lazily evaluated value.\n For example, a variable named 'foo' can be accessed as 'variables.foo'.\n- 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request.\n See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz\n- 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the\n request resource.\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object. No other metadata properties are accessible.\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Expression accessing a property named \"namespace\": {\"Expression\": \"object.__namespace__ \u003e 0\"}\n - Expression accessing a property named \"x-prop\": {\"Expression\": \"object.x__dash__prop \u003e 0\"}\n - Expression accessing a property named \"redact__d\": {\"Expression\": \"object.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with list type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\nRequired.", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Expression contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\" If the Expression contains line breaks. Message is required. The message must not contain line breaks. If unset, the message is \"failed Expression: {Expression}\".", + "type": "string" + }, + "messageExpression": { + "description": "messageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a validation, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the `expression` except for 'authorizer' and 'authorizer.requestResource'. Example: \"object.x must be less than max (\"+string(params.max)+\")\"", + "type": "string" + }, + "reason": { + "description": "Reason represents a machine-readable description of why this validation failed. If this is the first validation in the list to fail, this reason, as well as the corresponding HTTP response code, are used in the HTTP response to the client. The currently supported reasons are: \"Unauthorized\", \"Forbidden\", \"Invalid\", \"RequestEntityTooLarge\". If not set, StatusReasonInvalid is used in the response to the client.", + "type": "string" + } + } + }, + "io.k8s.api.admissionregistration.v1beta1.Variable": { + "description": "Variable is the definition of a variable that is used for composition. A variable is defined as a named expression.", + "type": "object", + "required": [ + "name", + "expression" + ], + "properties": { + "expression": { + "description": "Expression is the expression that will be evaluated as the value of the variable. The CEL expression has access to the same identifiers as the CEL expressions in Validation.", + "type": "string" + }, + "name": { + "description": "Name is the name of the variable. The name must be a valid CEL identifier and unique among all variables. The variable can be accessed in other expressions through `variables` For example, if name is \"foo\", the variable will be available as `variables.foo`", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion": { + "description": "An API server instance reports the version it can decode and the version it encodes objects to when persisting objects in the backend.", + "type": "object", + "properties": { + "apiServerID": { + "description": "The ID of the reporting API server.", + "type": "string" + }, + "decodableVersions": { + "description": "The API server can decode objects encoded in these versions. The encodingVersion must be included in the decodableVersions.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "encodingVersion": { + "description": "The API server encodes the object to this version when persisting it in the backend (e.g., etcd).", + "type": "string" + }, + "servedVersions": { + "description": "The API server can serve these versions. DecodableVersions must include all ServedVersions.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersion": { + "description": "Storage version of a specific resource.", + "type": "object", + "required": [ + "spec", + "status" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "The name is \u003cgroup\u003e.\u003cresource\u003e.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec is an empty spec. It is here to comply with Kubernetes API style.", + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec" + }, + "status": { + "description": "API server instances report the version they can decode and the version they encode objects to when persisting objects in the backend.", + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "internal.apiserver.k8s.io", + "kind": "StorageVersion", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition": { + "description": "Describes the state of the storageVersion at a certain point.", + "type": "object", + "required": [ + "type", + "status", + "reason" + ], + "properties": { + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "observedGeneration": { + "description": "If set, this represents the .metadata.generation that the condition was set based upon.", + "type": "integer", + "format": "int64" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of the condition.", + "type": "string" + } + } + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionList": { + "description": "A list of StorageVersions.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items holds a list of StorageVersion", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersion" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "internal.apiserver.k8s.io", + "kind": "StorageVersionList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionSpec": { + "description": "StorageVersionSpec is an empty spec.", + "type": "object" + }, + "io.k8s.api.apiserverinternal.v1alpha1.StorageVersionStatus": { + "description": "API server instances report the versions they can decode and the version they encode objects to when persisting objects in the backend.", + "type": "object", + "properties": { + "commonEncodingVersion": { + "description": "If all API server instances agree on the same encoding storage version, then this field is set to that version. Otherwise this field is left empty. API servers should finish updating its storageVersionStatus entry before serving write operations, so that this field will be in sync with the reality.", + "type": "string" + }, + "conditions": { + "description": "The latest available observations of the storageVersion's state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.StorageVersionCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "storageVersions": { + "description": "The reported versions per API server instance.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apiserverinternal.v1alpha1.ServerStorageVersion" + }, + "x-kubernetes-list-map-keys": [ + "apiServerID" + ], + "x-kubernetes-list-type": "map" + } + } + }, + "io.k8s.api.apps.v1.ControllerRevision": { + "description": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "type": "object", + "required": [ + "revision" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "data": { + "description": "Data is the serialized representation of the state.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "revision": { + "description": "Revision indicates the revision of the state represented by Data.", + "type": "integer", + "format": "int64" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ControllerRevision", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.ControllerRevisionList": { + "description": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of ControllerRevisions", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ControllerRevision" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ControllerRevisionList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DaemonSet": { + "description": "DaemonSet represents the configuration of a daemon set.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetSpec" + }, + "status": { + "description": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "DaemonSet", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DaemonSetCondition": { + "description": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of DaemonSet condition.", + "type": "string" + } + } + }, + "io.k8s.api.apps.v1.DaemonSetList": { + "description": "DaemonSetList is a collection of daemon sets.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "A list of daemon sets.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSet" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "DaemonSetList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DaemonSetSpec": { + "description": "DaemonSetSpec is the specification of a daemon set.", + "type": "object", + "required": [ + "selector", + "template" + ], + "properties": { + "minReadySeconds": { + "description": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "type": "integer", + "format": "int32" + }, + "revisionHistoryLimit": { + "description": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "type": "integer", + "format": "int32" + }, + "selector": { + "description": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "template": { + "description": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" + }, + "updateStrategy": { + "description": "An update strategy to replace existing DaemonSet pods with new pods.", + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetUpdateStrategy" + } + } + }, + "io.k8s.api.apps.v1.DaemonSetStatus": { + "description": "DaemonSetStatus represents the current status of a daemon set.", + "type": "object", + "required": [ + "currentNumberScheduled", + "numberMisscheduled", + "desiredNumberScheduled", + "numberReady" + ], + "properties": { + "collisionCount": { + "description": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "type": "integer", + "format": "int32" + }, + "conditions": { + "description": "Represents the latest available observations of a DaemonSet's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DaemonSetCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentNumberScheduled": { + "description": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "type": "integer", + "format": "int32" + }, + "desiredNumberScheduled": { + "description": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "type": "integer", + "format": "int32" + }, + "numberAvailable": { + "description": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "type": "integer", + "format": "int32" + }, + "numberMisscheduled": { + "description": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "type": "integer", + "format": "int32" + }, + "numberReady": { + "description": "numberReady is the number of nodes that should be running the daemon pod and have one or more of the daemon pod running with a Ready Condition.", + "type": "integer", + "format": "int32" + }, + "numberUnavailable": { + "description": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "type": "integer", + "format": "int32" + }, + "observedGeneration": { + "description": "The most recent generation observed by the daemon set controller.", + "type": "integer", + "format": "int64" + }, + "updatedNumberScheduled": { + "description": "The total number of nodes that are running updated daemon pod", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.apps.v1.DaemonSetUpdateStrategy": { + "description": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + "type": "object", + "properties": { + "rollingUpdate": { + "description": "Rolling update config params. Present only if type = \"RollingUpdate\".", + "$ref": "#/definitions/io.k8s.api.apps.v1.RollingUpdateDaemonSet" + }, + "type": { + "description": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + "type": "string" + } + } + }, + "io.k8s.api.apps.v1.Deployment": { + "description": "Deployment enables declarative updates for Pods and ReplicaSets.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the Deployment.", + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentSpec" + }, + "status": { + "description": "Most recently observed status of the Deployment.", + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "Deployment", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DeploymentCondition": { + "description": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastUpdateTime": { + "description": "The last time this condition was updated.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of deployment condition.", + "type": "string" + } + } + }, + "io.k8s.api.apps.v1.DeploymentList": { + "description": "DeploymentList is a list of Deployments.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of Deployments.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.Deployment" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "DeploymentList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.DeploymentSpec": { + "description": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "type": "object", + "required": [ + "selector", + "template" + ], + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "type": "integer", + "format": "int32" + }, + "paused": { + "description": "Indicates that the deployment is paused.", + "type": "boolean" + }, + "progressDeadlineSeconds": { + "description": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", + "type": "integer", + "format": "int32" + }, + "replicas": { + "description": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "type": "integer", + "format": "int32" + }, + "revisionHistoryLimit": { + "description": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "type": "integer", + "format": "int32" + }, + "selector": { + "description": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "strategy": { + "description": "The deployment strategy to use to replace existing pods with new ones.", + "x-kubernetes-patch-strategy": "retainKeys", + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentStrategy" + }, + "template": { + "description": "Template describes the pods that will be created. The only allowed template.spec.restartPolicy value is \"Always\".", + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" + } + } + }, + "io.k8s.api.apps.v1.DeploymentStatus": { + "description": "DeploymentStatus is the most recently observed status of the Deployment.", + "type": "object", + "properties": { + "availableReplicas": { + "description": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "type": "integer", + "format": "int32" + }, + "collisionCount": { + "description": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", + "type": "integer", + "format": "int32" + }, + "conditions": { + "description": "Represents the latest available observations of a deployment's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.DeploymentCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "observedGeneration": { + "description": "The generation observed by the deployment controller.", + "type": "integer", + "format": "int64" + }, + "readyReplicas": { + "description": "readyReplicas is the number of pods targeted by this Deployment with a Ready Condition.", + "type": "integer", + "format": "int32" + }, + "replicas": { + "description": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "type": "integer", + "format": "int32" + }, + "unavailableReplicas": { + "description": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "type": "integer", + "format": "int32" + }, + "updatedReplicas": { + "description": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.apps.v1.DeploymentStrategy": { + "description": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "object", + "properties": { + "rollingUpdate": { + "description": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", + "$ref": "#/definitions/io.k8s.api.apps.v1.RollingUpdateDeployment" + }, + "type": { + "description": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "type": "string" + } + } + }, + "io.k8s.api.apps.v1.ReplicaSet": { + "description": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetSpec" + }, + "status": { + "description": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ReplicaSet", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.ReplicaSetCondition": { + "description": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "The last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of replica set condition.", + "type": "string" + } + } + }, + "io.k8s.api.apps.v1.ReplicaSetList": { + "description": "ReplicaSetList is a collection of ReplicaSets.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSet" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "ReplicaSetList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.ReplicaSetSpec": { + "description": "ReplicaSetSpec is the specification of a ReplicaSet.", + "type": "object", + "required": [ + "selector" + ], + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "type": "integer", + "format": "int32" + }, + "replicas": { + "description": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "type": "integer", + "format": "int32" + }, + "selector": { + "description": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "template": { + "description": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" + } + } + }, + "io.k8s.api.apps.v1.ReplicaSetStatus": { + "description": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "type": "object", + "required": [ + "replicas" + ], + "properties": { + "availableReplicas": { + "description": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "type": "integer", + "format": "int32" + }, + "conditions": { + "description": "Represents the latest available observations of a replica set's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.ReplicaSetCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "fullyLabeledReplicas": { + "description": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "type": "integer", + "format": "int32" + }, + "observedGeneration": { + "description": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "type": "integer", + "format": "int64" + }, + "readyReplicas": { + "description": "readyReplicas is the number of pods targeted by this ReplicaSet with a Ready Condition.", + "type": "integer", + "format": "int32" + }, + "replicas": { + "description": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.apps.v1.RollingUpdateDaemonSet": { + "description": "Spec to control the desired behavior of daemon set rolling update.", + "type": "object", + "properties": { + "maxSurge": { + "description": "The maximum number of nodes with an existing available DaemonSet pod that can have an updated DaemonSet pod during during an update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up to a minimum of 1. Default value is 0. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their a new pod created before the old pod is marked as deleted. The update starts by launching new pods on 30% of nodes. Once an updated pod is available (Ready for at least minReadySeconds) the old DaemonSet pod on that node is marked deleted. If the old pod becomes unavailable for any reason (Ready transitions to false, is evicted, or is drained) an updated pod is immediatedly created on that node without considering surge limits. Allowing surge implies the possibility that the resources consumed by the daemonset on any given node can double if the readiness check fails, and so resource intensive daemonsets should take into account that they may cause evictions during disruption.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + }, + "maxUnavailable": { + "description": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0 if MaxSurge is 0 Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + } + } + }, + "io.k8s.api.apps.v1.RollingUpdateDeployment": { + "description": "Spec to control the desired behavior of rolling update.", + "type": "object", + "properties": { + "maxSurge": { + "description": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + }, + "maxUnavailable": { + "description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + } + } + }, + "io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy": { + "description": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "type": "object", + "properties": { + "maxUnavailable": { + "description": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding up. This can not be 0. Defaults to 1. This field is alpha-level and is only honored by servers that enable the MaxUnavailableStatefulSet feature. The field applies to all pods in the range 0 to Replicas-1. That means if there is any unavailable pod in the range 0 to Replicas-1, it will be counted towards MaxUnavailable.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + }, + "partition": { + "description": "Partition indicates the ordinal at which the StatefulSet should be partitioned for updates. During a rolling update, all pods from ordinal Replicas-1 to Partition are updated. All pods from ordinal Partition-1 to 0 remain untouched. This is helpful in being able to do a canary based deployment. The default value is 0.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.apps.v1.StatefulSet": { + "description": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\n\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the desired identities of pods in this set.", + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetSpec" + }, + "status": { + "description": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "StatefulSet", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.StatefulSetCondition": { + "description": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of statefulset condition.", + "type": "string" + } + } + }, + "io.k8s.api.apps.v1.StatefulSetList": { + "description": "StatefulSetList is a collection of StatefulSets.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of stateful sets.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSet" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apps", + "kind": "StatefulSetList", + "version": "v1" + } + ] + }, + "io.k8s.api.apps.v1.StatefulSetOrdinals": { + "description": "StatefulSetOrdinals describes the policy used for replica ordinal assignment in this StatefulSet.", + "type": "object", + "properties": { + "start": { + "description": "start is the number representing the first replica's index. It may be used to number replicas from an alternate index (eg: 1-indexed) over the default 0-indexed names, or to orchestrate progressive movement of replicas from one StatefulSet to another. If set, replica indices will be in the range:\n [.spec.ordinals.start, .spec.ordinals.start + .spec.replicas).\nIf unset, defaults to 0. Replica indices will be in the range:\n [0, .spec.replicas).", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy": { + "description": "StatefulSetPersistentVolumeClaimRetentionPolicy describes the policy used for PVCs created from the StatefulSet VolumeClaimTemplates.", + "type": "object", + "properties": { + "whenDeleted": { + "description": "WhenDeleted specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is deleted. The default policy of `Retain` causes PVCs to not be affected by StatefulSet deletion. The `Delete` policy causes those PVCs to be deleted.", + "type": "string" + }, + "whenScaled": { + "description": "WhenScaled specifies what happens to PVCs created from StatefulSet VolumeClaimTemplates when the StatefulSet is scaled down. The default policy of `Retain` causes PVCs to not be affected by a scaledown. The `Delete` policy causes the associated PVCs for any excess pods above the replica count to be deleted.", + "type": "string" + } + } + }, + "io.k8s.api.apps.v1.StatefulSetSpec": { + "description": "A StatefulSetSpec is the specification of a StatefulSet.", + "type": "object", + "required": [ + "selector", + "template", + "serviceName" + ], + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "type": "integer", + "format": "int32" + }, + "ordinals": { + "description": "ordinals controls the numbering of replica indices in a StatefulSet. The default ordinals behavior assigns a \"0\" index to the first replica and increments the index by one for each additional replica requested. Using the ordinals field requires the StatefulSetStartOrdinal feature gate to be enabled, which is beta.", + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetOrdinals" + }, + "persistentVolumeClaimRetentionPolicy": { + "description": "persistentVolumeClaimRetentionPolicy describes the lifecycle of persistent volume claims created from volumeClaimTemplates. By default, all persistent volume claims are created as needed and retained until manually deleted. This policy allows the lifecycle to be altered, for example by deleting persistent volume claims when their stateful set is deleted, or when their pod is scaled down. This requires the StatefulSetAutoDeletePVC feature gate to be enabled, which is alpha. +optional", + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetPersistentVolumeClaimRetentionPolicy" + }, + "podManagementPolicy": { + "description": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "type": "string" + }, + "replicas": { + "description": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "type": "integer", + "format": "int32" + }, + "revisionHistoryLimit": { + "description": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", + "type": "integer", + "format": "int32" + }, + "selector": { + "description": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "serviceName": { + "description": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "type": "string" + }, + "template": { + "description": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet. Each pod will be named with the format \u003cstatefulsetname\u003e-\u003cpodindex\u003e. For example, a pod in a StatefulSet named \"web\" with index number \"3\" would be named \"web-3\". The only allowed template.spec.restartPolicy value is \"Always\".", + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" + }, + "updateStrategy": { + "description": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetUpdateStrategy" + }, + "volumeClaimTemplates": { + "description": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim" + } + } + } + }, + "io.k8s.api.apps.v1.StatefulSetStatus": { + "description": "StatefulSetStatus represents the current state of a StatefulSet.", + "type": "object", + "required": [ + "replicas" + ], + "properties": { + "availableReplicas": { + "description": "Total number of available pods (ready for at least minReadySeconds) targeted by this statefulset.", + "type": "integer", + "format": "int32" + }, + "collisionCount": { + "description": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "type": "integer", + "format": "int32" + }, + "conditions": { + "description": "Represents the latest available observations of a statefulset's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.apps.v1.StatefulSetCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentReplicas": { + "description": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "type": "integer", + "format": "int32" + }, + "currentRevision": { + "description": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "type": "string" + }, + "observedGeneration": { + "description": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "type": "integer", + "format": "int64" + }, + "readyReplicas": { + "description": "readyReplicas is the number of pods created for this StatefulSet with a Ready Condition.", + "type": "integer", + "format": "int32" + }, + "replicas": { + "description": "replicas is the number of Pods created by the StatefulSet controller.", + "type": "integer", + "format": "int32" + }, + "updateRevision": { + "description": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "type": "string" + }, + "updatedReplicas": { + "description": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.apps.v1.StatefulSetUpdateStrategy": { + "description": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "object", + "properties": { + "rollingUpdate": { + "description": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", + "$ref": "#/definitions/io.k8s.api.apps.v1.RollingUpdateStatefulSetStrategy" + }, + "type": { + "description": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "type": "string" + } + } + }, + "io.k8s.api.authentication.v1.BoundObjectReference": { + "description": "BoundObjectReference is a reference to an object that a token is bound to.", + "type": "object", + "properties": { + "apiVersion": { + "description": "API version of the referent.", + "type": "string" + }, + "kind": { + "description": "Kind of the referent. Valid kinds are 'Pod' and 'Secret'.", + "type": "string" + }, + "name": { + "description": "Name of the referent.", + "type": "string" + }, + "uid": { + "description": "UID of the referent.", + "type": "string" + } + } + }, + "io.k8s.api.authentication.v1.SelfSubjectReview": { + "description": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "status": { + "description": "Status is filled in by the server with the user attributes.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.SelfSubjectReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "SelfSubjectReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authentication.v1.SelfSubjectReviewStatus": { + "description": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "type": "object", + "properties": { + "userInfo": { + "description": "User attributes of the user making this request.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo" + } + } + }, + "io.k8s.api.authentication.v1.TokenRequest": { + "description": "TokenRequest requests a token for a given service account.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds information about the request being evaluated", + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenRequestSpec" + }, + "status": { + "description": "Status is filled in by the server and indicates whether the token can be authenticated.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenRequestStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "TokenRequest", + "version": "v1" + } + ] + }, + "io.k8s.api.authentication.v1.TokenRequestSpec": { + "description": "TokenRequestSpec contains client provided parameters of a token request.", + "type": "object", + "required": [ + "audiences" + ], + "properties": { + "audiences": { + "description": "Audiences are the intendend audiences of the token. A recipient of a token must identify themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", + "type": "array", + "items": { + "type": "string" + } + }, + "boundObjectRef": { + "description": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.BoundObjectReference" + }, + "expirationSeconds": { + "description": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.authentication.v1.TokenRequestStatus": { + "description": "TokenRequestStatus is the result of a token request.", + "type": "object", + "required": [ + "token", + "expirationTimestamp" + ], + "properties": { + "expirationTimestamp": { + "description": "ExpirationTimestamp is the time of expiration of the returned token.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "token": { + "description": "Token is the opaque bearer token.", + "type": "string" + } + } + }, + "io.k8s.api.authentication.v1.TokenReview": { + "description": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds information about the request being evaluated", + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenReviewSpec" + }, + "status": { + "description": "Status is filled in by the server and indicates whether the request can be authenticated.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.TokenReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "TokenReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authentication.v1.TokenReviewSpec": { + "description": "TokenReviewSpec is a description of the token authentication request.", + "type": "object", + "properties": { + "audiences": { + "description": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", + "type": "array", + "items": { + "type": "string" + } + }, + "token": { + "description": "Token is the opaque bearer token.", + "type": "string" + } + } + }, + "io.k8s.api.authentication.v1.TokenReviewStatus": { + "description": "TokenReviewStatus is the result of the token authentication request.", + "type": "object", + "properties": { + "audiences": { + "description": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", + "type": "array", + "items": { + "type": "string" + } + }, + "authenticated": { + "description": "Authenticated indicates that the token was associated with a known user.", + "type": "boolean" + }, + "error": { + "description": "Error indicates that the token couldn't be checked", + "type": "string" + }, + "user": { + "description": "User is the UserInfo associated with the provided token.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo" + } + } + }, + "io.k8s.api.authentication.v1.UserInfo": { + "description": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "type": "object", + "properties": { + "extra": { + "description": "Any additional information provided by the authenticator.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "groups": { + "description": "The names of groups this user is a part of.", + "type": "array", + "items": { + "type": "string" + } + }, + "uid": { + "description": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "type": "string" + }, + "username": { + "description": "The name that uniquely identifies this user among all active users.", + "type": "string" + } + } + }, + "io.k8s.api.authentication.v1alpha1.SelfSubjectReview": { + "description": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "status": { + "description": "Status is filled in by the server with the user attributes.", + "$ref": "#/definitions/io.k8s.api.authentication.v1alpha1.SelfSubjectReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "SelfSubjectReview", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.authentication.v1alpha1.SelfSubjectReviewStatus": { + "description": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "type": "object", + "properties": { + "userInfo": { + "description": "User attributes of the user making this request.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo" + } + } + }, + "io.k8s.api.authentication.v1beta1.SelfSubjectReview": { + "description": "SelfSubjectReview contains the user information that the kube-apiserver has about the user making this request. When using impersonation, users will receive the user info of the user being impersonated. If impersonation or request header authentication is used, any extra keys will have their case ignored and returned as lowercase.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "status": { + "description": "Status is filled in by the server with the user attributes.", + "$ref": "#/definitions/io.k8s.api.authentication.v1beta1.SelfSubjectReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authentication.k8s.io", + "kind": "SelfSubjectReview", + "version": "v1beta1" + } + ] + }, + "io.k8s.api.authentication.v1beta1.SelfSubjectReviewStatus": { + "description": "SelfSubjectReviewStatus is filled by the kube-apiserver and sent back to a user.", + "type": "object", + "properties": { + "userInfo": { + "description": "User attributes of the user making this request.", + "$ref": "#/definitions/io.k8s.api.authentication.v1.UserInfo" + } + } + }, + "io.k8s.api.authorization.v1.LocalSubjectAccessReview": { + "description": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec" + }, + "status": { + "description": "Status is filled in by the server and indicates whether the request is allowed or not", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "LocalSubjectAccessReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.NonResourceAttributes": { + "description": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "type": "object", + "properties": { + "path": { + "description": "Path is the URL path of the request", + "type": "string" + }, + "verb": { + "description": "Verb is the standard HTTP verb", + "type": "string" + } + } + }, + "io.k8s.api.authorization.v1.NonResourceRule": { + "description": "NonResourceRule holds information that describes a rule for the non-resource", + "type": "object", + "required": [ + "verbs" + ], + "properties": { + "nonResourceURLs": { + "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", + "type": "array", + "items": { + "type": "string" + } + }, + "verbs": { + "description": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.authorization.v1.ResourceAttributes": { + "description": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "type": "object", + "properties": { + "group": { + "description": "Group is the API Group of the Resource. \"*\" means all.", + "type": "string" + }, + "name": { + "description": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "type": "string" + }, + "resource": { + "description": "Resource is one of the existing resource types. \"*\" means all.", + "type": "string" + }, + "subresource": { + "description": "Subresource is one of the existing resource types. \"\" means none.", + "type": "string" + }, + "verb": { + "description": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "type": "string" + }, + "version": { + "description": "Version is the API Version of the Resource. \"*\" means all.", + "type": "string" + } + } + }, + "io.k8s.api.authorization.v1.ResourceRule": { + "description": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "type": "object", + "required": [ + "verbs" + ], + "properties": { + "apiGroups": { + "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", + "type": "array", + "items": { + "type": "string" + } + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", + "type": "array", + "items": { + "type": "string" + } + }, + "resources": { + "description": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", + "type": "array", + "items": { + "type": "string" + } + }, + "verbs": { + "description": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.authorization.v1.SelfSubjectAccessReview": { + "description": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds information about the request being evaluated. user and groups must be empty", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec" + }, + "status": { + "description": "Status is filled in by the server and indicates whether the request is allowed or not", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "SelfSubjectAccessReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.SelfSubjectAccessReviewSpec": { + "description": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "type": "object", + "properties": { + "nonResourceAttributes": { + "description": "NonResourceAttributes describes information for a non-resource access request", + "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes" + }, + "resourceAttributes": { + "description": "ResourceAuthorizationAttributes describes information for a resource access request", + "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceAttributes" + } + } + }, + "io.k8s.api.authorization.v1.SelfSubjectRulesReview": { + "description": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds information about the request being evaluated.", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SelfSubjectRulesReviewSpec" + }, + "status": { + "description": "Status is filled in by the server and indicates the set of actions a user can perform.", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectRulesReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "SelfSubjectRulesReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.SelfSubjectRulesReviewSpec": { + "description": "SelfSubjectRulesReviewSpec defines the specification for SelfSubjectRulesReview.", + "type": "object", + "properties": { + "namespace": { + "description": "Namespace to evaluate rules for. Required.", + "type": "string" + } + } + }, + "io.k8s.api.authorization.v1.SubjectAccessReview": { + "description": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec holds information about the request being evaluated", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewSpec" + }, + "status": { + "description": "Status is filled in by the server and indicates whether the request is allowed or not", + "$ref": "#/definitions/io.k8s.api.authorization.v1.SubjectAccessReviewStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "authorization.k8s.io", + "kind": "SubjectAccessReview", + "version": "v1" + } + ] + }, + "io.k8s.api.authorization.v1.SubjectAccessReviewSpec": { + "description": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "type": "object", + "properties": { + "extra": { + "description": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "groups": { + "description": "Groups is the groups you're testing for.", + "type": "array", + "items": { + "type": "string" + } + }, + "nonResourceAttributes": { + "description": "NonResourceAttributes describes information for a non-resource access request", + "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceAttributes" + }, + "resourceAttributes": { + "description": "ResourceAuthorizationAttributes describes information for a resource access request", + "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceAttributes" + }, + "uid": { + "description": "UID information about the requesting user.", + "type": "string" + }, + "user": { + "description": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", + "type": "string" + } + } + }, + "io.k8s.api.authorization.v1.SubjectAccessReviewStatus": { + "description": "SubjectAccessReviewStatus", + "type": "object", + "required": [ + "allowed" + ], + "properties": { + "allowed": { + "description": "Allowed is required. True if the action would be allowed, false otherwise.", + "type": "boolean" + }, + "denied": { + "description": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", + "type": "boolean" + }, + "evaluationError": { + "description": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", + "type": "string" + }, + "reason": { + "description": "Reason is optional. It indicates why a request was allowed or denied.", + "type": "string" + } + } + }, + "io.k8s.api.authorization.v1.SubjectRulesReviewStatus": { + "description": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", + "type": "object", + "required": [ + "resourceRules", + "nonResourceRules", + "incomplete" + ], + "properties": { + "evaluationError": { + "description": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", + "type": "string" + }, + "incomplete": { + "description": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", + "type": "boolean" + }, + "nonResourceRules": { + "description": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.NonResourceRule" + } + }, + "resourceRules": { + "description": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.authorization.v1.ResourceRule" + } + } + } + }, + "io.k8s.api.autoscaling.v1.CrossVersionObjectReference": { + "description": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiVersion": { + "description": "apiVersion is the API version of the referent", + "type": "string" + }, + "kind": { + "description": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler": { + "description": "configuration of a horizontal pod autoscaler.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec defines the behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec" + }, + "status": { + "description": "status is the current information about the autoscaler.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscaler", + "version": "v1" + } + ] + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerList": { + "description": "list of horizontal pod autoscaler objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of horizontal pod autoscaler objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.HorizontalPodAutoscaler" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscalerList", + "version": "v1" + } + ] + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerSpec": { + "description": "specification of a horizontal pod autoscaler.", + "type": "object", + "required": [ + "scaleTargetRef", + "maxReplicas" + ], + "properties": { + "maxReplicas": { + "description": "maxReplicas is the upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "type": "integer", + "format": "int32" + }, + "minReplicas": { + "description": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "type": "integer", + "format": "int32" + }, + "scaleTargetRef": { + "description": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.CrossVersionObjectReference" + }, + "targetCPUUtilizationPercentage": { + "description": "targetCPUUtilizationPercentage is the target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.autoscaling.v1.HorizontalPodAutoscalerStatus": { + "description": "current status of a horizontal pod autoscaler", + "type": "object", + "required": [ + "currentReplicas", + "desiredReplicas" + ], + "properties": { + "currentCPUUtilizationPercentage": { + "description": "currentCPUUtilizationPercentage is the current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", + "type": "integer", + "format": "int32" + }, + "currentReplicas": { + "description": "currentReplicas is the current number of replicas of pods managed by this autoscaler.", + "type": "integer", + "format": "int32" + }, + "desiredReplicas": { + "description": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler.", + "type": "integer", + "format": "int32" + }, + "lastScaleTime": { + "description": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "observedGeneration": { + "description": "observedGeneration is the most recent generation observed by this autoscaler.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.autoscaling.v1.Scale": { + "description": "Scale represents a scaling request for a resource.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.ScaleSpec" + }, + "status": { + "description": "status is the current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v1.ScaleStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "Scale", + "version": "v1" + } + ] + }, + "io.k8s.api.autoscaling.v1.ScaleSpec": { + "description": "ScaleSpec describes the attributes of a scale subresource.", + "type": "object", + "properties": { + "replicas": { + "description": "replicas is the desired number of instances for the scaled object.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.autoscaling.v1.ScaleStatus": { + "description": "ScaleStatus represents the current status of a scale subresource.", + "type": "object", + "required": [ + "replicas" + ], + "properties": { + "replicas": { + "description": "replicas is the actual number of observed instances of the scaled object.", + "type": "integer", + "format": "int32" + }, + "selector": { + "description": "selector is the label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/", + "type": "string" + } + } + }, + "io.k8s.api.autoscaling.v2.ContainerResourceMetricSource": { + "description": "ContainerResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "type": "object", + "required": [ + "name", + "target", + "container" + ], + "properties": { + "container": { + "description": "container is the name of the container in the pods of the scaling target", + "type": "string" + }, + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + }, + "target": { + "description": "target specifies the target value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget" + } + } + }, + "io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus": { + "description": "ContainerResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "type": "object", + "required": [ + "name", + "current", + "container" + ], + "properties": { + "container": { + "description": "container is the name of the container in the pods of the scaling target", + "type": "string" + }, + "current": { + "description": "current contains the current value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus" + }, + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + } + } + }, + "io.k8s.api.autoscaling.v2.CrossVersionObjectReference": { + "description": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiVersion": { + "description": "apiVersion is the API version of the referent", + "type": "string" + }, + "kind": { + "description": "kind is the kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "name is the name of the referent; More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + } + } + }, + "io.k8s.api.autoscaling.v2.ExternalMetricSource": { + "description": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "type": "object", + "required": [ + "metric", + "target" + ], + "properties": { + "metric": { + "description": "metric identifies the target metric by name and selector", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier" + }, + "target": { + "description": "target specifies the target value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget" + } + } + }, + "io.k8s.api.autoscaling.v2.ExternalMetricStatus": { + "description": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "type": "object", + "required": [ + "metric", + "current" + ], + "properties": { + "current": { + "description": "current contains the current value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus" + }, + "metric": { + "description": "metric identifies the target metric by name and selector", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier" + } + } + }, + "io.k8s.api.autoscaling.v2.HPAScalingPolicy": { + "description": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", + "type": "object", + "required": [ + "type", + "value", + "periodSeconds" + ], + "properties": { + "periodSeconds": { + "description": "periodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", + "type": "integer", + "format": "int32" + }, + "type": { + "description": "type is used to specify the scaling policy.", + "type": "string" + }, + "value": { + "description": "value contains the amount of change which is permitted by the policy. It must be greater than zero", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.autoscaling.v2.HPAScalingRules": { + "description": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", + "type": "object", + "properties": { + "policies": { + "description": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingPolicy" + }, + "x-kubernetes-list-type": "atomic" + }, + "selectPolicy": { + "description": "selectPolicy is used to specify which policy should be used. If not set, the default value Max is used.", + "type": "string" + }, + "stabilizationWindowSeconds": { + "description": "stabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler": { + "description": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec" + }, + "status": { + "description": "status is the current information about the autoscaler.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscaler", + "version": "v2" + } + ] + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior": { + "description": "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).", + "type": "object", + "properties": { + "scaleDown": { + "description": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingRules" + }, + "scaleUp": { + "description": "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n * increase no more than 4 pods per 60 seconds\n * double the number of pods per 60 seconds\nNo stabilization is used.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HPAScalingRules" + } + } + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition": { + "description": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "lastTransitionTime is the last time the condition transitioned from one status to another", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "message is a human-readable explanation containing details about the transition", + "type": "string" + }, + "reason": { + "description": "reason is the reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "status is the status of the condition (True, False, Unknown)", + "type": "string" + }, + "type": { + "description": "type describes the current condition", + "type": "string" + } + } + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerList": { + "description": "HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of horizontal pod autoscaler objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscaler" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata is the standard list metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "autoscaling", + "kind": "HorizontalPodAutoscalerList", + "version": "v2" + } + ] + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerSpec": { + "description": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "type": "object", + "required": [ + "scaleTargetRef", + "maxReplicas" + ], + "properties": { + "behavior": { + "description": "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerBehavior" + }, + "maxReplicas": { + "description": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "type": "integer", + "format": "int32" + }, + "metrics": { + "description": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricSpec" + }, + "x-kubernetes-list-type": "atomic" + }, + "minReplicas": { + "description": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "type": "integer", + "format": "int32" + }, + "scaleTargetRef": { + "description": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.CrossVersionObjectReference" + } + } + }, + "io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerStatus": { + "description": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "type": "object", + "required": [ + "desiredReplicas" + ], + "properties": { + "conditions": { + "description": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.HorizontalPodAutoscalerCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentMetrics": { + "description": "currentMetrics is the last read state of the metrics used by this autoscaler.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricStatus" + }, + "x-kubernetes-list-type": "atomic" + }, + "currentReplicas": { + "description": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "type": "integer", + "format": "int32" + }, + "desiredReplicas": { + "description": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "type": "integer", + "format": "int32" + }, + "lastScaleTime": { + "description": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "observedGeneration": { + "description": "observedGeneration is the most recent generation observed by this autoscaler.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.autoscaling.v2.MetricIdentifier": { + "description": "MetricIdentifier defines the name and optionally selector for a metric", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "name is the name of the given metric", + "type": "string" + }, + "selector": { + "description": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + } + }, + "io.k8s.api.autoscaling.v2.MetricSpec": { + "description": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "object", + "required": [ + "type" + ], + "properties": { + "containerResource": { + "description": "containerResource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod of the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. This is an alpha feature and can be enabled by the HPAContainerMetrics feature flag.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ContainerResourceMetricSource" + }, + "external": { + "description": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ExternalMetricSource" + }, + "object": { + "description": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ObjectMetricSource" + }, + "pods": { + "description": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.PodsMetricSource" + }, + "resource": { + "description": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ResourceMetricSource" + }, + "type": { + "description": "type is the type of metric source. It should be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "string" + } + } + }, + "io.k8s.api.autoscaling.v2.MetricStatus": { + "description": "MetricStatus describes the last-read state of a single metric.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "containerResource": { + "description": "container resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing a single container in each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ContainerResourceMetricStatus" + }, + "external": { + "description": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ExternalMetricStatus" + }, + "object": { + "description": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ObjectMetricStatus" + }, + "pods": { + "description": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.PodsMetricStatus" + }, + "resource": { + "description": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.ResourceMetricStatus" + }, + "type": { + "description": "type is the type of metric source. It will be one of \"ContainerResource\", \"External\", \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object. Note: \"ContainerResource\" type is available on when the feature-gate HPAContainerMetrics is enabled", + "type": "string" + } + } + }, + "io.k8s.api.autoscaling.v2.MetricTarget": { + "description": "MetricTarget defines the target value, average value, or average utilization of a specific metric", + "type": "object", + "required": [ + "type" + ], + "properties": { + "averageUtilization": { + "description": "averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type", + "type": "integer", + "format": "int32" + }, + "averageValue": { + "description": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "type": { + "description": "type represents whether the metric type is Utilization, Value, or AverageValue", + "type": "string" + }, + "value": { + "description": "value is the target value of the metric (as a quantity).", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + }, + "io.k8s.api.autoscaling.v2.MetricValueStatus": { + "description": "MetricValueStatus holds the current value for a metric", + "type": "object", + "properties": { + "averageUtilization": { + "description": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "type": "integer", + "format": "int32" + }, + "averageValue": { + "description": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "value": { + "description": "value is the current value of the metric (as a quantity).", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + }, + "io.k8s.api.autoscaling.v2.ObjectMetricSource": { + "description": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "type": "object", + "required": [ + "describedObject", + "target", + "metric" + ], + "properties": { + "describedObject": { + "description": "describedObject specifies the descriptions of a object,such as kind,name apiVersion", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.CrossVersionObjectReference" + }, + "metric": { + "description": "metric identifies the target metric by name and selector", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier" + }, + "target": { + "description": "target specifies the target value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget" + } + } + }, + "io.k8s.api.autoscaling.v2.ObjectMetricStatus": { + "description": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "type": "object", + "required": [ + "metric", + "current", + "describedObject" + ], + "properties": { + "current": { + "description": "current contains the current value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus" + }, + "describedObject": { + "description": "DescribedObject specifies the descriptions of a object,such as kind,name apiVersion", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.CrossVersionObjectReference" + }, + "metric": { + "description": "metric identifies the target metric by name and selector", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier" + } + } + }, + "io.k8s.api.autoscaling.v2.PodsMetricSource": { + "description": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "type": "object", + "required": [ + "metric", + "target" + ], + "properties": { + "metric": { + "description": "metric identifies the target metric by name and selector", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier" + }, + "target": { + "description": "target specifies the target value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget" + } + } + }, + "io.k8s.api.autoscaling.v2.PodsMetricStatus": { + "description": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "type": "object", + "required": [ + "metric", + "current" + ], + "properties": { + "current": { + "description": "current contains the current value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus" + }, + "metric": { + "description": "metric identifies the target metric by name and selector", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricIdentifier" + } + } + }, + "io.k8s.api.autoscaling.v2.ResourceMetricSource": { + "description": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "type": "object", + "required": [ + "name", + "target" + ], + "properties": { + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + }, + "target": { + "description": "target specifies the target value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricTarget" + } + } + }, + "io.k8s.api.autoscaling.v2.ResourceMetricStatus": { + "description": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "type": "object", + "required": [ + "name", + "current" + ], + "properties": { + "current": { + "description": "current contains the current value for the given metric", + "$ref": "#/definitions/io.k8s.api.autoscaling.v2.MetricValueStatus" + }, + "name": { + "description": "name is the name of the resource in question.", + "type": "string" + } + } + }, + "io.k8s.api.batch.v1.CronJob": { + "description": "CronJob represents the configuration of a single cron job.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.batch.v1.CronJobSpec" + }, + "status": { + "description": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.batch.v1.CronJobStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "CronJob", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.CronJobList": { + "description": "CronJobList is a collection of cron jobs.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CronJobs.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.CronJob" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "CronJobList", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.CronJobSpec": { + "description": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "type": "object", + "required": [ + "schedule", + "jobTemplate" + ], + "properties": { + "concurrencyPolicy": { + "description": "Specifies how to treat concurrent executions of a Job. Valid values are:\n\n- \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "type": "string" + }, + "failedJobsHistoryLimit": { + "description": "The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.", + "type": "integer", + "format": "int32" + }, + "jobTemplate": { + "description": "Specifies the job that will be created when executing a CronJob.", + "$ref": "#/definitions/io.k8s.api.batch.v1.JobTemplateSpec" + }, + "schedule": { + "description": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "type": "string" + }, + "startingDeadlineSeconds": { + "description": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "type": "integer", + "format": "int64" + }, + "successfulJobsHistoryLimit": { + "description": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.", + "type": "integer", + "format": "int32" + }, + "suspend": { + "description": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "type": "boolean" + }, + "timeZone": { + "description": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", + "type": "string" + } + } + }, + "io.k8s.api.batch.v1.CronJobStatus": { + "description": "CronJobStatus represents the current state of a cron job.", + "type": "object", + "properties": { + "active": { + "description": "A list of pointers to currently running jobs.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "x-kubernetes-list-type": "atomic" + }, + "lastScheduleTime": { + "description": "Information when was the last time the job was successfully scheduled.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastSuccessfulTime": { + "description": "Information when was the last time the job successfully completed.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "io.k8s.api.batch.v1.Job": { + "description": "Job represents the configuration of a single job.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.batch.v1.JobSpec" + }, + "status": { + "description": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.batch.v1.JobStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "Job", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.JobCondition": { + "description": "JobCondition describes current state of a job.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastProbeTime": { + "description": "Last time the condition was checked.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastTransitionTime": { + "description": "Last time the condition transit from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "Human readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "(brief) reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of job condition, Complete or Failed.", + "type": "string" + } + } + }, + "io.k8s.api.batch.v1.JobList": { + "description": "JobList is a collection of jobs.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of Jobs.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.Job" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "batch", + "kind": "JobList", + "version": "v1" + } + ] + }, + "io.k8s.api.batch.v1.JobSpec": { + "description": "JobSpec describes how the job execution will look like.", + "type": "object", + "required": [ + "template" + ], + "properties": { + "activeDeadlineSeconds": { + "description": "Specifies the duration in seconds relative to the startTime that the job may be continuously active before the system tries to terminate it; value must be positive integer. If a Job is suspended (at creation or through an update), this timer will effectively be stopped and reset when the Job is resumed again.", + "type": "integer", + "format": "int64" + }, + "backoffLimit": { + "description": "Specifies the number of retries before marking this job failed. Defaults to 6", + "type": "integer", + "format": "int32" + }, + "backoffLimitPerIndex": { + "description": "Specifies the limit for the number of retries within an index before marking this index as failed. When enabled the number of failures per index is kept in the pod's batch.kubernetes.io/job-index-failure-count annotation. It can only be set when Job's completionMode=Indexed, and the Pod's restart policy is Never. The field is immutable. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "type": "integer", + "format": "int32" + }, + "completionMode": { + "description": "completionMode specifies how Pod completions are tracked. It can be `NonIndexed` (default) or `Indexed`.\n\n`NonIndexed` means that the Job is considered complete when there have been .spec.completions successfully completed Pods. Each Pod completion is homologous to each other.\n\n`Indexed` means that the Pods of a Job get an associated completion index from 0 to (.spec.completions - 1), available in the annotation batch.kubernetes.io/job-completion-index. The Job is considered complete when there is one successfully completed Pod for each index. When value is `Indexed`, .spec.completions must be specified and `.spec.parallelism` must be less than or equal to 10^5. In addition, The Pod name takes the form `$(job-name)-$(index)-$(random-string)`, the Pod hostname takes the form `$(job-name)-$(index)`.\n\nMore completion modes can be added in the future. If the Job controller observes a mode that it doesn't recognize, which is possible during upgrades due to version skew, the controller skips updates for the Job.", + "type": "string" + }, + "completions": { + "description": "Specifies the desired number of successfully finished pods the job should be run with. Setting to null means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "type": "integer", + "format": "int32" + }, + "manualSelector": { + "description": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", + "type": "boolean" + }, + "maxFailedIndexes": { + "description": "Specifies the maximal number of failed indexes before marking the Job as failed, when backoffLimitPerIndex is set. Once the number of failed indexes exceeds this number the entire Job is marked as Failed and its execution is terminated. When left as null the job continues execution of all of its indexes and is marked with the `Complete` Job condition. It can only be specified when backoffLimitPerIndex is set. It can be null or up to completions. It is required and must be less than or equal to 10^4 when is completions greater than 10^5. This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "type": "integer", + "format": "int32" + }, + "parallelism": { + "description": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) \u003c .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "type": "integer", + "format": "int32" + }, + "podFailurePolicy": { + "description": "Specifies the policy of handling failed pods. In particular, it allows to specify the set of actions and conditions which need to be satisfied to take the associated action. If empty, the default behaviour applies - the counter of failed pods, represented by the jobs's .status.failed field, is incremented and it is checked against the backoffLimit. This field cannot be used in combination with restartPolicy=OnFailure.\n\nThis field is beta-level. It can be used when the `JobPodFailurePolicy` feature gate is enabled (enabled by default).", + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicy" + }, + "podReplacementPolicy": { + "description": "podReplacementPolicy specifies when to create replacement Pods. Possible values are: - TerminatingOrFailed means that we recreate pods\n when they are terminating (has a metadata.deletionTimestamp) or failed.\n- Failed means to wait until a previously created Pod is fully terminated (has phase\n Failed or Succeeded) before creating a replacement Pod.\n\nWhen using podFailurePolicy, Failed is the the only allowed value. TerminatingOrFailed and Failed are allowed values when podFailurePolicy is not in use. This is an beta field. To use this, enable the JobPodReplacementPolicy feature toggle. This is on by default.", + "type": "string" + }, + "selector": { + "description": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "suspend": { + "description": "suspend specifies whether the Job controller should create Pods or not. If a Job is created with suspend set to true, no Pods are created by the Job controller. If a Job is suspended after creation (i.e. the flag goes from false to true), the Job controller will delete all active Pods associated with this Job. Users must design their workload to gracefully handle this. Suspending a Job will reset the StartTime field of the Job, effectively resetting the ActiveDeadlineSeconds timer too. Defaults to false.", + "type": "boolean" + }, + "template": { + "description": "Describes the pod that will be created when executing a job. The only allowed template.spec.restartPolicy values are \"Never\" or \"OnFailure\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" + }, + "ttlSecondsAfterFinished": { + "description": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.batch.v1.JobStatus": { + "description": "JobStatus represents the current state of a Job.", + "type": "object", + "properties": { + "active": { + "description": "The number of pending and running pods.", + "type": "integer", + "format": "int32" + }, + "completedIndexes": { + "description": "completedIndexes holds the completed indexes when .spec.completionMode = \"Indexed\" in a text format. The indexes are represented as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the completed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\".", + "type": "string" + }, + "completionTime": { + "description": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC. The completion time is only set when the job finishes successfully.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "conditions": { + "description": "The latest available observations of an object's current state. When a Job fails, one of the conditions will have type \"Failed\" and status true. When a Job is suspended, one of the conditions will have type \"Suspended\" and status true; when the Job is resumed, the status of this condition will become false. When a Job is completed, one of the conditions will have type \"Complete\" and status true. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.JobCondition" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "failed": { + "description": "The number of pods which reached phase Failed.", + "type": "integer", + "format": "int32" + }, + "failedIndexes": { + "description": "FailedIndexes holds the failed indexes when backoffLimitPerIndex=true. The indexes are represented in the text format analogous as for the `completedIndexes` field, ie. they are kept as decimal integers separated by commas. The numbers are listed in increasing order. Three or more consecutive numbers are compressed and represented by the first and last element of the series, separated by a hyphen. For example, if the failed indexes are 1, 3, 4, 5 and 7, they are represented as \"1,3-5,7\". This field is beta-level. It can be used when the `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).", + "type": "string" + }, + "ready": { + "description": "The number of pods which have a Ready condition.", + "type": "integer", + "format": "int32" + }, + "startTime": { + "description": "Represents time when the job controller started processing a job. When a Job is created in the suspended state, this field is not set until the first time it is resumed. This field is reset every time a Job is resumed from suspension. It is represented in RFC3339 form and is in UTC.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "succeeded": { + "description": "The number of pods which reached phase Succeeded.", + "type": "integer", + "format": "int32" + }, + "terminating": { + "description": "The number of pods which are terminating (in phase Pending or Running and have a deletionTimestamp).\n\nThis field is beta-level. The job controller populates the field when the feature gate JobPodReplacementPolicy is enabled (enabled by default).", + "type": "integer", + "format": "int32" + }, + "uncountedTerminatedPods": { + "description": "uncountedTerminatedPods holds the UIDs of Pods that have terminated but the job controller hasn't yet accounted for in the status counters.\n\nThe job controller creates pods with a finalizer. When a pod terminates (succeeded or failed), the controller does three steps to account for it in the job status:\n\n1. Add the pod UID to the arrays in this field. 2. Remove the pod finalizer. 3. Remove the pod UID from the arrays while increasing the corresponding\n counter.\n\nOld jobs might not be tracked using this field, in which case the field remains null.", + "$ref": "#/definitions/io.k8s.api.batch.v1.UncountedTerminatedPods" + } + } + }, + "io.k8s.api.batch.v1.JobTemplateSpec": { + "description": "JobTemplateSpec describes the data a Job should have when created from a template", + "type": "object", + "properties": { + "metadata": { + "description": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.batch.v1.JobSpec" + } + } + }, + "io.k8s.api.batch.v1.PodFailurePolicy": { + "description": "PodFailurePolicy describes how failed pods influence the backoffLimit.", + "type": "object", + "required": [ + "rules" + ], + "properties": { + "rules": { + "description": "A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicyRule" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement": { + "description": "PodFailurePolicyOnExitCodesRequirement describes the requirement for handling a failed pod based on its container exit codes. In particular, it lookups the .state.terminated.exitCode for each app container and init container status, represented by the .status.containerStatuses and .status.initContainerStatuses fields in the Pod status, respectively. Containers completed with success (exit code 0) are excluded from the requirement check.", + "type": "object", + "required": [ + "operator", + "values" + ], + "properties": { + "containerName": { + "description": "Restricts the check for exit codes to the container with the specified name. When null, the rule applies to all containers. When specified, it should match one the container or initContainer names in the pod template.", + "type": "string" + }, + "operator": { + "description": "Represents the relationship between the container exit code(s) and the specified values. Containers completed with success (exit code 0) are excluded from the requirement check. Possible values are:\n\n- In: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is in the set of specified values.\n- NotIn: the requirement is satisfied if at least one container exit code\n (might be multiple if there are multiple containers not restricted\n by the 'containerName' field) is not in the set of specified values.\nAdditional values are considered to be added in the future. Clients should react to an unknown operator by assuming the requirement is not satisfied.", + "type": "string" + }, + "values": { + "description": "Specifies the set of values. Each returned container exit code (might be multiple in case of multiple containers) is checked against this set of values with respect to the operator. The list of values must be ordered and must not contain duplicates. Value '0' cannot be used for the In operator. At least one element is required. At most 255 elements are allowed.", + "type": "array", + "items": { + "type": "integer", + "format": "int32" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern": { + "description": "PodFailurePolicyOnPodConditionsPattern describes a pattern for matching an actual pod condition type.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "status": { + "description": "Specifies the required Pod condition status. To match a pod condition it is required that the specified status equals the pod condition status. Defaults to True.", + "type": "string" + }, + "type": { + "description": "Specifies the required Pod condition type. To match a pod condition it is required that specified type equals the pod condition type.", + "type": "string" + } + } + }, + "io.k8s.api.batch.v1.PodFailurePolicyRule": { + "description": "PodFailurePolicyRule describes how a pod failure is handled when the requirements are met. One of onExitCodes and onPodConditions, but not both, can be used in each rule.", + "type": "object", + "required": [ + "action" + ], + "properties": { + "action": { + "description": "Specifies the action taken on a pod failure when the requirements are satisfied. Possible values are:\n\n- FailJob: indicates that the pod's job is marked as Failed and all\n running pods are terminated.\n- FailIndex: indicates that the pod's index is marked as Failed and will\n not be restarted.\n This value is beta-level. It can be used when the\n `JobBackoffLimitPerIndex` feature gate is enabled (enabled by default).\n- Ignore: indicates that the counter towards the .backoffLimit is not\n incremented and a replacement pod is created.\n- Count: indicates that the pod is handled in the default way - the\n counter towards the .backoffLimit is incremented.\nAdditional values are considered to be added in the future. Clients should react to an unknown action by skipping the rule.", + "type": "string" + }, + "onExitCodes": { + "description": "Represents the requirement on the container exit codes.", + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicyOnExitCodesRequirement" + }, + "onPodConditions": { + "description": "Represents the requirement on the pod conditions. The requirement is represented as a list of pod condition patterns. The requirement is satisfied if at least one pattern matches an actual pod condition. At most 20 elements are allowed.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.batch.v1.PodFailurePolicyOnPodConditionsPattern" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.batch.v1.UncountedTerminatedPods": { + "description": "UncountedTerminatedPods holds UIDs of Pods that have terminated but haven't been accounted in Job status counters.", + "type": "object", + "properties": { + "failed": { + "description": "failed holds UIDs of failed Pods.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "succeeded": { + "description": "succeeded holds UIDs of succeeded Pods.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.certificates.v1.CertificateSigningRequest": { + "description": "CertificateSigningRequest objects provide a mechanism to obtain x509 certificates by submitting a certificate signing request, and having it asynchronously approved and issued.\n\nKubelets use this API to obtain:\n 1. client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client-kubelet\" signerName).\n 2. serving certificates for TLS endpoints kube-apiserver can connect to securely (with the \"kubernetes.io/kubelet-serving\" signerName).\n\nThis API can be used to request client certificates to authenticate to kube-apiserver (with the \"kubernetes.io/kube-apiserver-client\" signerName), or to obtain certificates from custom non-Kubernetes signers.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec contains the certificate request, and is immutable after creation. Only the request, signerName, expirationSeconds, and usages fields can be set on creation. Other fields are derived by Kubernetes and cannot be modified by users.", + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestSpec" + }, + "status": { + "description": "status contains information about whether the request is approved or denied, and the certificate issued by the signer, or the failure condition indicating signer failure.", + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "CertificateSigningRequest", + "version": "v1" + } + ] + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestCondition": { + "description": "CertificateSigningRequestCondition describes a condition of a CertificateSigningRequest object", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "lastTransitionTime is the time the condition last transitioned from one status to another. If unset, when a new condition type is added or an existing condition's status is changed, the server defaults this to the current time.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastUpdateTime": { + "description": "lastUpdateTime is the time of the last update to this condition", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "message contains a human readable message with details about the request state", + "type": "string" + }, + "reason": { + "description": "reason indicates a brief reason for the request state", + "type": "string" + }, + "status": { + "description": "status of the condition, one of True, False, Unknown. Approved, Denied, and Failed conditions may not be \"False\" or \"Unknown\".", + "type": "string" + }, + "type": { + "description": "type of the condition. Known conditions are \"Approved\", \"Denied\", and \"Failed\".\n\nAn \"Approved\" condition is added via the /approval subresource, indicating the request was approved and should be issued by the signer.\n\nA \"Denied\" condition is added via the /approval subresource, indicating the request was denied and should not be issued by the signer.\n\nA \"Failed\" condition is added via the /status subresource, indicating the signer failed to issue the certificate.\n\nApproved and Denied conditions are mutually exclusive. Approved, Denied, and Failed conditions cannot be removed once added.\n\nOnly one condition of a given type is allowed.", + "type": "string" + } + } + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestList": { + "description": "CertificateSigningRequestList is a collection of CertificateSigningRequest objects", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a collection of CertificateSigningRequest objects", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequest" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "CertificateSigningRequestList", + "version": "v1" + } + ] + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestSpec": { + "description": "CertificateSigningRequestSpec contains the certificate request.", + "type": "object", + "required": [ + "request", + "signerName" + ], + "properties": { + "expirationSeconds": { + "description": "expirationSeconds is the requested duration of validity of the issued certificate. The certificate signer may issue a certificate with a different validity duration so a client must check the delta between the notBefore and and notAfter fields in the issued certificate to determine the actual duration.\n\nThe v1.22+ in-tree implementations of the well-known Kubernetes signers will honor this field as long as the requested duration is not greater than the maximum duration they will honor per the --cluster-signing-duration CLI flag to the Kubernetes controller manager.\n\nCertificate signers may not honor this field for various reasons:\n\n 1. Old signer that is unaware of the field (such as the in-tree\n implementations prior to v1.22)\n 2. Signer whose configured maximum is shorter than the requested duration\n 3. Signer whose configured minimum is longer than the requested duration\n\nThe minimum valid value for expirationSeconds is 600, i.e. 10 minutes.", + "type": "integer", + "format": "int32" + }, + "extra": { + "description": "extra contains extra attributes of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "groups": { + "description": "groups contains group membership of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "request": { + "description": "request contains an x509 certificate signing request encoded in a \"CERTIFICATE REQUEST\" PEM block. When serialized as JSON or YAML, the data is additionally base64-encoded.", + "type": "string", + "format": "byte", + "x-kubernetes-list-type": "atomic" + }, + "signerName": { + "description": "signerName indicates the requested signer, and is a qualified name.\n\nList/watch requests for CertificateSigningRequests can filter on this field using a \"spec.signerName=NAME\" fieldSelector.\n\nWell-known Kubernetes signers are:\n 1. \"kubernetes.io/kube-apiserver-client\": issues client certificates that can be used to authenticate to kube-apiserver.\n Requests for this signer are never auto-approved by kube-controller-manager, can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 2. \"kubernetes.io/kube-apiserver-client-kubelet\": issues client certificates that kubelets use to authenticate to kube-apiserver.\n Requests for this signer can be auto-approved by the \"csrapproving\" controller in kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n 3. \"kubernetes.io/kubelet-serving\" issues serving certificates that kubelets use to serve TLS endpoints, which kube-apiserver can connect to securely.\n Requests for this signer are never auto-approved by kube-controller-manager, and can be issued by the \"csrsigning\" controller in kube-controller-manager.\n\nMore details are available at https://k8s.io/docs/reference/access-authn-authz/certificate-signing-requests/#kubernetes-signers\n\nCustom signerNames can also be specified. The signer defines:\n 1. Trust distribution: how trust (CA bundles) are distributed.\n 2. Permitted subjects: and behavior when a disallowed subject is requested.\n 3. Required, permitted, or forbidden x509 extensions in the request (including whether subjectAltNames are allowed, which types, restrictions on allowed values) and behavior when a disallowed extension is requested.\n 4. Required, permitted, or forbidden key usages / extended key usages.\n 5. Expiration/certificate lifetime: whether it is fixed by the signer, configurable by the admin.\n 6. Whether or not requests for CA certificates are allowed.", + "type": "string" + }, + "uid": { + "description": "uid contains the uid of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "type": "string" + }, + "usages": { + "description": "usages specifies a set of key usages requested in the issued certificate.\n\nRequests for TLS client certificates typically request: \"digital signature\", \"key encipherment\", \"client auth\".\n\nRequests for TLS serving certificates typically request: \"key encipherment\", \"digital signature\", \"server auth\".\n\nValid values are:\n \"signing\", \"digital signature\", \"content commitment\",\n \"key encipherment\", \"key agreement\", \"data encipherment\",\n \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\",\n \"server auth\", \"client auth\",\n \"code signing\", \"email protection\", \"s/mime\",\n \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\",\n \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "username": { + "description": "username contains the name of the user that created the CertificateSigningRequest. Populated by the API server on creation and immutable.", + "type": "string" + } + } + }, + "io.k8s.api.certificates.v1.CertificateSigningRequestStatus": { + "description": "CertificateSigningRequestStatus contains conditions used to indicate approved/denied/failed status of the request, and the issued certificate.", + "type": "object", + "properties": { + "certificate": { + "description": "certificate is populated with an issued certificate by the signer after an Approved condition is present. This field is set via the /status subresource. Once populated, this field is immutable.\n\nIf the certificate signing request is denied, a condition of type \"Denied\" is added and this field remains empty. If the signer cannot issue the certificate, a condition of type \"Failed\" is added and this field remains empty.\n\nValidation requirements:\n 1. certificate must contain one or more PEM blocks.\n 2. All PEM blocks must have the \"CERTIFICATE\" label, contain no headers, and the encoded data\n must be a BER-encoded ASN.1 Certificate structure as described in section 4 of RFC5280.\n 3. Non-PEM content may appear before or after the \"CERTIFICATE\" PEM blocks and is unvalidated,\n to allow for explanatory text as described in section 5.2 of RFC7468.\n\nIf more than one PEM block is present, and the definition of the requested spec.signerName does not indicate otherwise, the first block is the issued certificate, and subsequent blocks should be treated as intermediate certificates and presented in TLS handshakes.\n\nThe certificate is encoded in PEM format.\n\nWhen serialized as JSON or YAML, the data is additionally base64-encoded, so it consists of:\n\n base64(\n -----BEGIN CERTIFICATE-----\n ...\n -----END CERTIFICATE-----\n )", + "type": "string", + "format": "byte", + "x-kubernetes-list-type": "atomic" + }, + "conditions": { + "description": "conditions applied to the request. Known conditions are \"Approved\", \"Denied\", and \"Failed\".", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.certificates.v1.CertificateSigningRequestCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + } + } + }, + "io.k8s.api.certificates.v1alpha1.ClusterTrustBundle": { + "description": "ClusterTrustBundle is a cluster-scoped container for X.509 trust anchors (root certificates).\n\nClusterTrustBundle objects are considered to be readable by any authenticated user in the cluster, because they can be mounted by pods using the `clusterTrustBundle` projection. All service accounts have read access to ClusterTrustBundles by default. Users who only have namespace-level access to a cluster can read ClusterTrustBundles by impersonating a serviceaccount that they have access to.\n\nIt can be optionally associated with a particular assigner, in which case it contains one valid set of trust anchors for that signer. Signers may have multiple associated ClusterTrustBundles; each is an independent set of trust anchors for that signer. Admission control is used to enforce that only users with permissions on the signer can create or modify the corresponding bundle.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata contains the object metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec contains the signer (if any) and trust anchors.", + "$ref": "#/definitions/io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "ClusterTrustBundle", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.certificates.v1alpha1.ClusterTrustBundleList": { + "description": "ClusterTrustBundleList is a collection of ClusterTrustBundle objects", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a collection of ClusterTrustBundle objects", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.certificates.v1alpha1.ClusterTrustBundle" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "metadata contains the list metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "certificates.k8s.io", + "kind": "ClusterTrustBundleList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.certificates.v1alpha1.ClusterTrustBundleSpec": { + "description": "ClusterTrustBundleSpec contains the signer and trust anchors.", + "type": "object", + "required": [ + "trustBundle" + ], + "properties": { + "signerName": { + "description": "signerName indicates the associated signer, if any.\n\nIn order to create or update a ClusterTrustBundle that sets signerName, you must have the following cluster-scoped permission: group=certificates.k8s.io resource=signers resourceName=\u003cthe signer name\u003e verb=attest.\n\nIf signerName is not empty, then the ClusterTrustBundle object must be named with the signer name as a prefix (translating slashes to colons). For example, for the signer name `example.com/foo`, valid ClusterTrustBundle object names include `example.com:foo:abc` and `example.com:foo:v1`.\n\nIf signerName is empty, then the ClusterTrustBundle object's name must not have such a prefix.\n\nList/watch requests for ClusterTrustBundles can filter on this field using a `spec.signerName=NAME` field selector.", + "type": "string" + }, + "trustBundle": { + "description": "trustBundle contains the individual X.509 trust anchors for this bundle, as PEM bundle of PEM-wrapped, DER-formatted X.509 certificates.\n\nThe data must consist only of PEM certificate blocks that parse as valid X.509 certificates. Each certificate must include a basic constraints extension with the CA bit set. The API server will reject objects that contain duplicate certificates, or that use PEM block headers.\n\nUsers of ClusterTrustBundles, including Kubelet, are free to reorder and deduplicate certificate blocks in this file according to their own logic, as well as to drop PEM block headers and inter-block data.", + "type": "string" + } + } + }, + "io.k8s.api.coordination.v1.Lease": { + "description": "Lease defines a lease concept.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec contains the specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.coordination.v1.LeaseSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "Lease", + "version": "v1" + } + ] + }, + "io.k8s.api.coordination.v1.LeaseList": { + "description": "LeaseList is a list of Lease objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.coordination.v1.Lease" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "coordination.k8s.io", + "kind": "LeaseList", + "version": "v1" + } + ] + }, + "io.k8s.api.coordination.v1.LeaseSpec": { + "description": "LeaseSpec is a specification of a Lease.", + "type": "object", + "properties": { + "acquireTime": { + "description": "acquireTime is a time when the current lease was acquired.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + }, + "holderIdentity": { + "description": "holderIdentity contains the identity of the holder of a current lease.", + "type": "string" + }, + "leaseDurationSeconds": { + "description": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed renewTime.", + "type": "integer", + "format": "int32" + }, + "leaseTransitions": { + "description": "leaseTransitions is the number of transitions of a lease between holders.", + "type": "integer", + "format": "int32" + }, + "renewTime": { + "description": "renewTime is a time when the current holder of a lease has last updated the lease.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + } + } + }, + "io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource": { + "description": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "volumeID" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "string" + }, + "partition": { + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "type": "integer", + "format": "int32" + }, + "readOnly": { + "description": "readOnly value true will force the readOnly setting in VolumeMounts. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "boolean" + }, + "volumeID": { + "description": "volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Affinity": { + "description": "Affinity is a group of affinity scheduling rules.", + "type": "object", + "properties": { + "nodeAffinity": { + "description": "Describes node affinity scheduling rules for the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeAffinity" + }, + "podAffinity": { + "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinity" + }, + "podAntiAffinity": { + "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", + "$ref": "#/definitions/io.k8s.api.core.v1.PodAntiAffinity" + } + } + }, + "io.k8s.api.core.v1.AttachedVolume": { + "description": "AttachedVolume describes a volume attached to a node", + "type": "object", + "required": [ + "name", + "devicePath" + ], + "properties": { + "devicePath": { + "description": "DevicePath represents the device path where the volume should be available", + "type": "string" + }, + "name": { + "description": "Name of the attached volume", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.AzureDiskVolumeSource": { + "description": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "type": "object", + "required": [ + "diskName", + "diskURI" + ], + "properties": { + "cachingMode": { + "description": "cachingMode is the Host Caching mode: None, Read Only, Read Write.", + "type": "string" + }, + "diskName": { + "description": "diskName is the Name of the data disk in the blob storage", + "type": "string" + }, + "diskURI": { + "description": "diskURI is the URI of data disk in the blob storage", + "type": "string" + }, + "fsType": { + "description": "fsType is Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "kind": { + "description": "kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", + "type": "string" + }, + "readOnly": { + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.AzureFilePersistentVolumeSource": { + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "type": "object", + "required": [ + "secretName", + "shareName" + ], + "properties": { + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretName": { + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", + "type": "string" + }, + "secretNamespace": { + "description": "secretNamespace is the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", + "type": "string" + }, + "shareName": { + "description": "shareName is the azure Share Name", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.AzureFileVolumeSource": { + "description": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "type": "object", + "required": [ + "secretName", + "shareName" + ], + "properties": { + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretName": { + "description": "secretName is the name of secret that contains Azure Storage Account Name and Key", + "type": "string" + }, + "shareName": { + "description": "shareName is the azure share Name", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Binding": { + "description": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "type": "object", + "required": [ + "target" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "target": { + "description": "The target object that you want to bind to the standard object.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Binding", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.CSIPersistentVolumeSource": { + "description": "Represents storage that is managed by an external CSI volume driver (Beta feature)", + "type": "object", + "required": [ + "driver", + "volumeHandle" + ], + "properties": { + "controllerExpandSecretRef": { + "description": "controllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "controllerPublishSecretRef": { + "description": "controllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "driver": { + "description": "driver is the name of the driver to use for this volume. Required.", + "type": "string" + }, + "fsType": { + "description": "fsType to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", + "type": "string" + }, + "nodeExpandSecretRef": { + "description": "nodeExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeExpandVolume call. This field is optional, may be omitted if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "nodePublishSecretRef": { + "description": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "nodeStageSecretRef": { + "description": "nodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "readOnly": { + "description": "readOnly value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + "type": "boolean" + }, + "volumeAttributes": { + "description": "volumeAttributes of the volume to publish.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "volumeHandle": { + "description": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.CSIVolumeSource": { + "description": "Represents a source location of a volume to mount, managed by an external CSI driver", + "type": "object", + "required": [ + "driver" + ], + "properties": { + "driver": { + "description": "driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "type": "string" + }, + "fsType": { + "description": "fsType to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "type": "string" + }, + "nodePublishSecretRef": { + "description": "nodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "readOnly": { + "description": "readOnly specifies a read-only configuration for the volume. Defaults to false (read/write).", + "type": "boolean" + }, + "volumeAttributes": { + "description": "volumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.Capabilities": { + "description": "Adds and removes POSIX capabilities from running containers.", + "type": "object", + "properties": { + "add": { + "description": "Added capabilities", + "type": "array", + "items": { + "type": "string" + } + }, + "drop": { + "description": "Removed capabilities", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.CephFSPersistentVolumeSource": { + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "required": [ + "monitors" + ], + "properties": { + "monitors": { + "description": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "array", + "items": { + "type": "string" + } + }, + "path": { + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "type": "string" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "boolean" + }, + "secretFile": { + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + }, + "secretRef": { + "description": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "user": { + "description": "user is Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.CephFSVolumeSource": { + "description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "required": [ + "monitors" + ], + "properties": { + "monitors": { + "description": "monitors is Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "array", + "items": { + "type": "string" + } + }, + "path": { + "description": "path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "type": "string" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "boolean" + }, + "secretFile": { + "description": "secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + }, + "secretRef": { + "description": "secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "user": { + "description": "user is optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.CinderPersistentVolumeSource": { + "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "volumeID" + ], + "properties": { + "fsType": { + "description": "fsType Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is Optional: points to a secret object containing parameters used to connect to OpenStack.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "volumeID": { + "description": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.CinderVolumeSource": { + "description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "volumeID" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is optional: points to a secret object containing parameters used to connect to OpenStack.", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "volumeID": { + "description": "volumeID used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ClaimSource": { + "description": "ClaimSource describes a reference to a ResourceClaim.\n\nExactly one of these fields should be set. Consumers of this type must treat an empty object as if it has an unknown value.", + "type": "object", + "properties": { + "resourceClaimName": { + "description": "ResourceClaimName is the name of a ResourceClaim object in the same namespace as this pod.", + "type": "string" + }, + "resourceClaimTemplateName": { + "description": "ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod.\n\nThe template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses.\n\nThis field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ClientIPConfig": { + "description": "ClientIPConfig represents the configurations of Client IP based session affinity.", + "type": "object", + "properties": { + "timeoutSeconds": { + "description": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be \u003e0 \u0026\u0026 \u003c=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.core.v1.ClusterTrustBundleProjection": { + "description": "ClusterTrustBundleProjection describes how to select a set of ClusterTrustBundle objects and project their contents into the pod filesystem.", + "type": "object", + "required": [ + "path" + ], + "properties": { + "labelSelector": { + "description": "Select all ClusterTrustBundles that match this label selector. Only has effect if signerName is set. Mutually-exclusive with name. If unset, interpreted as \"match nothing\". If set but empty, interpreted as \"match everything\".", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "name": { + "description": "Select a single ClusterTrustBundle by object name. Mutually-exclusive with signerName and labelSelector.", + "type": "string" + }, + "optional": { + "description": "If true, don't block pod startup if the referenced ClusterTrustBundle(s) aren't available. If using name, then the named ClusterTrustBundle is allowed not to exist. If using signerName, then the combination of signerName and labelSelector is allowed to match zero ClusterTrustBundles.", + "type": "boolean" + }, + "path": { + "description": "Relative path from the volume root to write the bundle.", + "type": "string" + }, + "signerName": { + "description": "Select all ClusterTrustBundles that match this signer name. Mutually-exclusive with name. The contents of all selected ClusterTrustBundles will be unified and deduplicated.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ComponentCondition": { + "description": "Information about the condition of a component.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "error": { + "description": "Condition error code for a component. For example, a health check error code.", + "type": "string" + }, + "message": { + "description": "Message about the condition for a component. For example, information about a health check.", + "type": "string" + }, + "status": { + "description": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", + "type": "string" + }, + "type": { + "description": "Type of condition for a component. Valid value: \"Healthy\"", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ComponentStatus": { + "description": "ComponentStatus (and ComponentStatusList) holds the cluster validation info. Deprecated: This API is deprecated in v1.19+", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "conditions": { + "description": "List of component conditions observed", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ComponentCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ComponentStatus", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ComponentStatusList": { + "description": "Status of all the conditions for the component as a list of ComponentStatus objects. Deprecated: This API is deprecated in v1.19+", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ComponentStatus objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ComponentStatus" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ComponentStatusList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ConfigMap": { + "description": "ConfigMap holds configuration data for pods to consume.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "binaryData": { + "description": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", + "type": "object", + "additionalProperties": { + "type": "string", + "format": "byte" + } + }, + "data": { + "description": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "immutable": { + "description": "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.", + "type": "boolean" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ConfigMap", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ConfigMapEnvSource": { + "description": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "type": "object", + "properties": { + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the ConfigMap must be defined", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.ConfigMapKeySelector": { + "description": "Selects a key from a ConfigMap.", + "type": "object", + "required": [ + "key" + ], + "properties": { + "key": { + "description": "The key to select.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the ConfigMap or its key must be defined", + "type": "boolean" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ConfigMapList": { + "description": "ConfigMapList is a resource containing a list of ConfigMap objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of ConfigMaps.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMap" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ConfigMapList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ConfigMapNodeConfigSource": { + "description": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. This API is deprecated since 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration", + "type": "object", + "required": [ + "namespace", + "name", + "kubeletConfigKey" + ], + "properties": { + "kubeletConfigKey": { + "description": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", + "type": "string" + }, + "name": { + "description": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", + "type": "string" + }, + "namespace": { + "description": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", + "type": "string" + }, + "resourceVersion": { + "description": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "type": "string" + }, + "uid": { + "description": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ConfigMapProjection": { + "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "type": "object", + "properties": { + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + } + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional specify whether the ConfigMap or its keys must be defined", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.ConfigMapVolumeSource": { + "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "type": "object", + "properties": { + "defaultMode": { + "description": "defaultMode is optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "type": "integer", + "format": "int32" + }, + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + } + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional specify whether the ConfigMap or its keys must be defined", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.Container": { + "description": "A single application container that you want to run within a pod.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The container image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The container image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" + } + }, + "image": { + "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "description": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "$ref": "#/definitions/io.k8s.api.core.v1.Lifecycle" + }, + "livenessProbe": { + "description": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/io.k8s.api.core.v1.Probe" + }, + "name": { + "description": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "type": "string" + }, + "ports": { + "description": "List of ports to expose from the container. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Modifying this array with strategic merge patch may corrupt the data. For more information See https://github.com/kubernetes/kubernetes/issues/108255. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" + }, + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "description": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/io.k8s.api.core.v1.Probe" + }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "restartPolicy": { + "description": "RestartPolicy defines the restart behavior of individual containers in a pod. This field may only be set for init containers, and the only allowed value is \"Always\". For non-init containers or when this field is not specified, the restart behavior is defined by the Pod's restart policy and the container type. Setting the RestartPolicy as \"Always\" for the init container will have the following effect: this init container will be continually restarted on exit until all regular containers have terminated. Once all regular containers have completed, all init containers with restartPolicy \"Always\" will be shut down. This lifecycle differs from normal init containers and is often referred to as a \"sidecar\" container. Although this init container still starts in the init container sequence, it does not wait for the container to complete before proceeding to the next init container. Instead, the next init container starts immediately after this init container is started, or after any startupProbe has successfully completed.", + "type": "string" + }, + "securityContext": { + "description": "SecurityContext defines the security options the container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "$ref": "#/definitions/io.k8s.api.core.v1.SecurityContext" + }, + "startupProbe": { + "description": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "$ref": "#/definitions/io.k8s.api.core.v1.Probe" + }, + "stdin": { + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "terminationMessagePath": { + "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" + }, + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" + }, + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ContainerImage": { + "description": "Describe a container image", + "type": "object", + "properties": { + "names": { + "description": "Names by which this image is known. e.g. [\"kubernetes.example/hyperkube:v1.0.7\", \"cloud-vendor.registry.example/cloud-vendor/hyperkube:v1.0.7\"]", + "type": "array", + "items": { + "type": "string" + } + }, + "sizeBytes": { + "description": "The size of the image in bytes.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.core.v1.ContainerPort": { + "description": "ContainerPort represents a network port in a single container.", + "type": "object", + "required": [ + "containerPort" + ], + "properties": { + "containerPort": { + "description": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 \u003c x \u003c 65536.", + "type": "integer", + "format": "int32" + }, + "hostIP": { + "description": "What host IP to bind the external port to.", + "type": "string" + }, + "hostPort": { + "description": "Number of port to expose on the host. If specified, this must be a valid port number, 0 \u003c x \u003c 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", + "type": "integer", + "format": "int32" + }, + "name": { + "description": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", + "type": "string" + }, + "protocol": { + "description": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ContainerResizePolicy": { + "description": "ContainerResizePolicy represents resource resize policy for the container.", + "type": "object", + "required": [ + "resourceName", + "restartPolicy" + ], + "properties": { + "resourceName": { + "description": "Name of the resource to which this resource resize policy applies. Supported values: cpu, memory.", + "type": "string" + }, + "restartPolicy": { + "description": "Restart policy to apply when specified resource is resized. If not specified, it defaults to NotRequired.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ContainerState": { + "description": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", + "type": "object", + "properties": { + "running": { + "description": "Details about a running container", + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStateRunning" + }, + "terminated": { + "description": "Details about a terminated container", + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStateTerminated" + }, + "waiting": { + "description": "Details about a waiting container", + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStateWaiting" + } + } + }, + "io.k8s.api.core.v1.ContainerStateRunning": { + "description": "ContainerStateRunning is a running state of a container.", + "type": "object", + "properties": { + "startedAt": { + "description": "Time at which the container was last (re-)started", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "io.k8s.api.core.v1.ContainerStateTerminated": { + "description": "ContainerStateTerminated is a terminated state of a container.", + "type": "object", + "required": [ + "exitCode" + ], + "properties": { + "containerID": { + "description": "Container's ID in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'", + "type": "string" + }, + "exitCode": { + "description": "Exit status from the last termination of the container", + "type": "integer", + "format": "int32" + }, + "finishedAt": { + "description": "Time at which the container last terminated", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "Message regarding the last termination of the container", + "type": "string" + }, + "reason": { + "description": "(brief) reason from the last termination of the container", + "type": "string" + }, + "signal": { + "description": "Signal from the last termination of the container", + "type": "integer", + "format": "int32" + }, + "startedAt": { + "description": "Time at which previous execution of the container started", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "io.k8s.api.core.v1.ContainerStateWaiting": { + "description": "ContainerStateWaiting is a waiting state of a container.", + "type": "object", + "properties": { + "message": { + "description": "Message regarding why the container is not yet running.", + "type": "string" + }, + "reason": { + "description": "(brief) reason the container is not yet running.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ContainerStatus": { + "description": "ContainerStatus contains details for the current status of this container.", + "type": "object", + "required": [ + "name", + "ready", + "restartCount", + "image", + "imageID" + ], + "properties": { + "allocatedResources": { + "description": "AllocatedResources represents the compute resources allocated for this container by the node. Kubelet sets this value to Container.Resources.Requests upon successful pod admission and after successfully admitting desired pod resize.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "containerID": { + "description": "ContainerID is the ID of the container in the format '\u003ctype\u003e://\u003ccontainer_id\u003e'. Where type is a container runtime identifier, returned from Version call of CRI API (for example \"containerd\").", + "type": "string" + }, + "image": { + "description": "Image is the name of container image that the container is running. The container image may not match the image used in the PodSpec, as it may have been resolved by the runtime. More info: https://kubernetes.io/docs/concepts/containers/images.", + "type": "string" + }, + "imageID": { + "description": "ImageID is the image ID of the container's image. The image ID may not match the image ID of the image used in the PodSpec, as it may have been resolved by the runtime.", + "type": "string" + }, + "lastState": { + "description": "LastTerminationState holds the last termination state of the container to help debug container crashes and restarts. This field is not populated if the container is still running and RestartCount is 0.", + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerState" + }, + "name": { + "description": "Name is a DNS_LABEL representing the unique name of the container. Each container in a pod must have a unique name across all container types. Cannot be updated.", + "type": "string" + }, + "ready": { + "description": "Ready specifies whether the container is currently passing its readiness check. The value will change as readiness probes keep executing. If no readiness probes are specified, this field defaults to true once the container is fully started (see Started field).\n\nThe value is typically used to determine whether a container is ready to accept traffic.", + "type": "boolean" + }, + "resources": { + "description": "Resources represents the compute resource requests and limits that have been successfully enacted on the running container after it has been started or has been successfully resized.", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "restartCount": { + "description": "RestartCount holds the number of times the container has been restarted. Kubelet makes an effort to always increment the value, but there are cases when the state may be lost due to node restarts and then the value may be reset to 0. The value is never negative.", + "type": "integer", + "format": "int32" + }, + "started": { + "description": "Started indicates whether the container has finished its postStart lifecycle hook and passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. In both cases, startup probes will run again. Is always true when no startupProbe is defined and container is running and has passed the postStart lifecycle hook. The null value must be treated the same as false.", + "type": "boolean" + }, + "state": { + "description": "State holds details about the container's current condition.", + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerState" + } + } + }, + "io.k8s.api.core.v1.DaemonEndpoint": { + "description": "DaemonEndpoint contains information about a single Daemon endpoint.", + "type": "object", + "required": [ + "Port" + ], + "properties": { + "Port": { + "description": "Port number of the given endpoint.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.core.v1.DownwardAPIProjection": { + "description": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "type": "object", + "properties": { + "items": { + "description": "Items is a list of DownwardAPIVolume file", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" + } + } + } + }, + "io.k8s.api.core.v1.DownwardAPIVolumeFile": { + "description": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "type": "object", + "required": [ + "path" + ], + "properties": { + "fieldRef": { + "description": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectFieldSelector" + }, + "mode": { + "description": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "type": "integer", + "format": "int32" + }, + "path": { + "description": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "type": "string" + }, + "resourceFieldRef": { + "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceFieldSelector" + } + } + }, + "io.k8s.api.core.v1.DownwardAPIVolumeSource": { + "description": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + "type": "object", + "properties": { + "defaultMode": { + "description": "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "type": "integer", + "format": "int32" + }, + "items": { + "description": "Items is a list of downward API volume file", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeFile" + } + } + } + }, + "io.k8s.api.core.v1.EmptyDirVolumeSource": { + "description": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "type": "object", + "properties": { + "medium": { + "description": "medium represents what type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "type": "string" + }, + "sizeLimit": { + "description": "sizeLimit is the total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + }, + "io.k8s.api.core.v1.EndpointAddress": { + "description": "EndpointAddress is a tuple that describes single IP address.", + "type": "object", + "required": [ + "ip" + ], + "properties": { + "hostname": { + "description": "The Hostname of this endpoint", + "type": "string" + }, + "ip": { + "description": "The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16).", + "type": "string" + }, + "nodeName": { + "description": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", + "type": "string" + }, + "targetRef": { + "description": "Reference to object providing the endpoint.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.EndpointPort": { + "description": "EndpointPort is a tuple that describes a single port.", + "type": "object", + "required": [ + "port" + ], + "properties": { + "appProtocol": { + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "type": "string" + }, + "name": { + "description": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", + "type": "string" + }, + "port": { + "description": "The port number of the endpoint.", + "type": "integer", + "format": "int32" + }, + "protocol": { + "description": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.EndpointSubset": { + "description": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n\n\t{\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t}\n\nThe resulting set of endpoints can be viewed as:\n\n\ta: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n\tb: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "type": "object", + "properties": { + "addresses": { + "description": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" + } + }, + "notReadyAddresses": { + "description": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointAddress" + } + }, + "ports": { + "description": "Port numbers available on the related IP addresses.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointPort" + } + } + } + }, + "io.k8s.api.core.v1.Endpoints": { + "description": "Endpoints is a collection of endpoints that implement the actual service. Example:\n\n\t Name: \"mysvc\",\n\t Subsets: [\n\t {\n\t Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n\t },\n\t {\n\t Addresses: [{\"ip\": \"10.10.3.3\"}],\n\t Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n\t },\n\t]", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "subsets": { + "description": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EndpointSubset" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Endpoints", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EndpointsList": { + "description": "EndpointsList is a list of endpoints.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of endpoints.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Endpoints" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "EndpointsList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EnvFromSource": { + "description": "EnvFromSource represents the source of a set of ConfigMaps", + "type": "object", + "properties": { + "configMapRef": { + "description": "The ConfigMap to select from", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapEnvSource" + }, + "prefix": { + "description": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "type": "string" + }, + "secretRef": { + "description": "The Secret to select from", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretEnvSource" + } + } + }, + "io.k8s.api.core.v1.EnvVar": { + "description": "EnvVar represents an environment variable present in a Container.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name of the environment variable. Must be a C_IDENTIFIER.", + "type": "string" + }, + "value": { + "description": "Variable references $(VAR_NAME) are expanded using the previously defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "type": "string" + }, + "valueFrom": { + "description": "Source for the environment variable's value. Cannot be used if value is not empty.", + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVarSource" + } + } + }, + "io.k8s.api.core.v1.EnvVarSource": { + "description": "EnvVarSource represents a source for the value of an EnvVar.", + "type": "object", + "properties": { + "configMapKeyRef": { + "description": "Selects a key of a ConfigMap.", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapKeySelector" + }, + "fieldRef": { + "description": "Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['\u003cKEY\u003e']`, `metadata.annotations['\u003cKEY\u003e']`, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectFieldSelector" + }, + "resourceFieldRef": { + "description": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceFieldSelector" + }, + "secretKeyRef": { + "description": "Selects a key of a secret in the pod's namespace", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretKeySelector" + } + } + }, + "io.k8s.api.core.v1.EphemeralContainer": { + "description": "An EphemeralContainer is a temporary container that you may add to an existing Pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a Pod is removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation.\n\nTo add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "args": { + "description": "Arguments to the entrypoint. The image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "description": "Entrypoint array. Not executed within a shell. The image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. Double $$ are reduced to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. \"$$(VAR_NAME)\" will produce the string literal \"$(VAR_NAME)\". Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "type": "array", + "items": { + "type": "string" + } + }, + "env": { + "description": "List of environment variables to set in the container. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvVar" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "envFrom": { + "description": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EnvFromSource" + } + }, + "image": { + "description": "Container image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "type": "string" + }, + "imagePullPolicy": { + "description": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "type": "string" + }, + "lifecycle": { + "description": "Lifecycle is not allowed for ephemeral containers.", + "$ref": "#/definitions/io.k8s.api.core.v1.Lifecycle" + }, + "livenessProbe": { + "description": "Probes are not allowed for ephemeral containers.", + "$ref": "#/definitions/io.k8s.api.core.v1.Probe" + }, + "name": { + "description": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + "type": "string" + }, + "ports": { + "description": "Ports are not allowed for ephemeral containers.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerPort" + }, + "x-kubernetes-list-map-keys": [ + "containerPort", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "containerPort", + "x-kubernetes-patch-strategy": "merge" + }, + "readinessProbe": { + "description": "Probes are not allowed for ephemeral containers.", + "$ref": "#/definitions/io.k8s.api.core.v1.Probe" + }, + "resizePolicy": { + "description": "Resources resize policy for the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerResizePolicy" + }, + "x-kubernetes-list-type": "atomic" + }, + "resources": { + "description": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceRequirements" + }, + "restartPolicy": { + "description": "Restart policy for the container to manage the restart behavior of each container within a pod. This may only be set for init containers. You cannot set this field on ephemeral containers.", + "type": "string" + }, + "securityContext": { + "description": "Optional: SecurityContext defines the security options the ephemeral container should be run with. If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecurityContext" + }, + "startupProbe": { + "description": "Probes are not allowed for ephemeral containers.", + "$ref": "#/definitions/io.k8s.api.core.v1.Probe" + }, + "stdin": { + "description": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "type": "boolean" + }, + "stdinOnce": { + "description": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "type": "boolean" + }, + "targetContainerName": { + "description": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec.\n\nThe container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined.", + "type": "string" + }, + "terminationMessagePath": { + "description": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "type": "string" + }, + "terminationMessagePolicy": { + "description": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "type": "string" + }, + "tty": { + "description": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", + "type": "boolean" + }, + "volumeDevices": { + "description": "volumeDevices is the list of block devices to be used by the container.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeDevice" + }, + "x-kubernetes-patch-merge-key": "devicePath", + "x-kubernetes-patch-strategy": "merge" + }, + "volumeMounts": { + "description": "Pod volumes to mount into the container's filesystem. Subpath mounts are not allowed for ephemeral containers. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeMount" + }, + "x-kubernetes-patch-merge-key": "mountPath", + "x-kubernetes-patch-strategy": "merge" + }, + "workingDir": { + "description": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.EphemeralVolumeSource": { + "description": "Represents an ephemeral volume that is handled by a normal storage driver.", + "type": "object", + "properties": { + "volumeClaimTemplate": { + "description": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `\u003cpod name\u003e-\u003cvolume name\u003e` where `\u003cvolume name\u003e` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimTemplate" + } + } + }, + "io.k8s.api.core.v1.Event": { + "description": "Event is a report of an event somewhere in the cluster. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", + "type": "object", + "required": [ + "metadata", + "involvedObject" + ], + "properties": { + "action": { + "description": "What action was taken/failed regarding to the Regarding object.", + "type": "string" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "count": { + "description": "The number of times this event has occurred.", + "type": "integer", + "format": "int32" + }, + "eventTime": { + "description": "Time when this Event was first observed.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + }, + "firstTimestamp": { + "description": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "involvedObject": { + "description": "The object that this event is about.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "lastTimestamp": { + "description": "The time at which the most recent occurrence of this event was recorded.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human-readable description of the status of this operation.", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "reason": { + "description": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "type": "string" + }, + "related": { + "description": "Optional secondary object for more complex actions.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "reportingComponent": { + "description": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "type": "string" + }, + "reportingInstance": { + "description": "ID of the controller instance, e.g. `kubelet-xyzf`.", + "type": "string" + }, + "series": { + "description": "Data about the Event series this event represents or nil if it's a singleton Event.", + "$ref": "#/definitions/io.k8s.api.core.v1.EventSeries" + }, + "source": { + "description": "The component reporting this event. Should be a short machine understandable string.", + "$ref": "#/definitions/io.k8s.api.core.v1.EventSource" + }, + "type": { + "description": "Type of this event (Normal, Warning), new types could be added in the future", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Event", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EventList": { + "description": "EventList is a list of events.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of events", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Event" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "EventList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.EventSeries": { + "description": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", + "type": "object", + "properties": { + "count": { + "description": "Number of occurrences in this series up to the last heartbeat time", + "type": "integer", + "format": "int32" + }, + "lastObservedTime": { + "description": "Time of the last occurrence observed", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + } + } + }, + "io.k8s.api.core.v1.EventSource": { + "description": "EventSource contains information for an event.", + "type": "object", + "properties": { + "component": { + "description": "Component from which the event is generated.", + "type": "string" + }, + "host": { + "description": "Node name on which the event is generated.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ExecAction": { + "description": "ExecAction describes a \"run in container\" action.", + "type": "object", + "properties": { + "command": { + "description": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.FCVolumeSource": { + "description": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + "type": "object", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "lun": { + "description": "lun is Optional: FC target lun number", + "type": "integer", + "format": "int32" + }, + "readOnly": { + "description": "readOnly is Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "targetWWNs": { + "description": "targetWWNs is Optional: FC target worldwide names (WWNs)", + "type": "array", + "items": { + "type": "string" + } + }, + "wwids": { + "description": "wwids Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.FlexPersistentVolumeSource": { + "description": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", + "type": "object", + "required": [ + "driver" + ], + "properties": { + "driver": { + "description": "driver is the name of the driver to use for this volume.", + "type": "string" + }, + "fsType": { + "description": "fsType is the Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "type": "string" + }, + "options": { + "description": "options is Optional: this field holds extra command options if any.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "readOnly": { + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + } + } + }, + "io.k8s.api.core.v1.FlexVolumeSource": { + "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "type": "object", + "required": [ + "driver" + ], + "properties": { + "driver": { + "description": "driver is the name of the driver to use for this volume.", + "type": "string" + }, + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "type": "string" + }, + "options": { + "description": "options is Optional: this field holds extra command options if any.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "readOnly": { + "description": "readOnly is Optional: defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is Optional: secretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + } + } + }, + "io.k8s.api.core.v1.FlockerVolumeSource": { + "description": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "properties": { + "datasetName": { + "description": "datasetName is Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", + "type": "string" + }, + "datasetUUID": { + "description": "datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.GCEPersistentDiskVolumeSource": { + "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "pdName" + ], + "properties": { + "fsType": { + "description": "fsType is filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "string" + }, + "partition": { + "description": "partition is the partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "integer", + "format": "int32" + }, + "pdName": { + "description": "pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.GRPCAction": { + "type": "object", + "required": [ + "port" + ], + "properties": { + "port": { + "description": "Port number of the gRPC service. Number must be in the range 1 to 65535.", + "type": "integer", + "format": "int32" + }, + "service": { + "description": "Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md).\n\nIf this is not specified, the default behavior is defined by gRPC.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.GitRepoVolumeSource": { + "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "type": "object", + "required": [ + "repository" + ], + "properties": { + "directory": { + "description": "directory is the target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", + "type": "string" + }, + "repository": { + "description": "repository is the URL", + "type": "string" + }, + "revision": { + "description": "revision is the commit hash for the specified revision.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.GlusterfsPersistentVolumeSource": { + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "required": [ + "endpoints", + "path" + ], + "properties": { + "endpoints": { + "description": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "endpointsNamespace": { + "description": "endpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "path": { + "description": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.GlusterfsVolumeSource": { + "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "required": [ + "endpoints", + "path" + ], + "properties": { + "endpoints": { + "description": "endpoints is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "path": { + "description": "path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.HTTPGetAction": { + "description": "HTTPGetAction describes an action based on HTTP Get requests.", + "type": "object", + "required": [ + "port" + ], + "properties": { + "host": { + "description": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", + "type": "string" + }, + "httpHeaders": { + "description": "Custom headers to set in the request. HTTP allows repeated headers.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.HTTPHeader" + } + }, + "path": { + "description": "Path to access on the HTTP server.", + "type": "string" + }, + "port": { + "description": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + }, + "scheme": { + "description": "Scheme to use for connecting to the host. Defaults to HTTP.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.HTTPHeader": { + "description": "HTTPHeader describes a custom header to be used in HTTP probes", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "The header field name. This will be canonicalized upon output, so case-variant names will be understood as the same header.", + "type": "string" + }, + "value": { + "description": "The header field value", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.HostAlias": { + "description": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", + "type": "object", + "properties": { + "hostnames": { + "description": "Hostnames for the above IP address.", + "type": "array", + "items": { + "type": "string" + } + }, + "ip": { + "description": "IP address of the host file entry.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.HostIP": { + "description": "HostIP represents a single IP address allocated to the host.", + "type": "object", + "properties": { + "ip": { + "description": "IP is the IP address assigned to the host", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.HostPathVolumeSource": { + "description": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "required": [ + "path" + ], + "properties": { + "path": { + "description": "path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "string" + }, + "type": { + "description": "type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ISCSIPersistentVolumeSource": { + "description": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "targetPortal", + "iqn", + "lun" + ], + "properties": { + "chapAuthDiscovery": { + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + "type": "boolean" + }, + "chapAuthSession": { + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication", + "type": "boolean" + }, + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "type": "string" + }, + "initiatorName": { + "description": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", + "type": "string" + }, + "iqn": { + "description": "iqn is Target iSCSI Qualified Name.", + "type": "string" + }, + "iscsiInterface": { + "description": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "type": "string" + }, + "lun": { + "description": "lun is iSCSI Target Lun number.", + "type": "integer", + "format": "int32" + }, + "portals": { + "description": "portals is the iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "type": "array", + "items": { + "type": "string" + } + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is the CHAP Secret for iSCSI target and initiator authentication", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "targetPortal": { + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ISCSIVolumeSource": { + "description": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "targetPortal", + "iqn", + "lun" + ], + "properties": { + "chapAuthDiscovery": { + "description": "chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication", + "type": "boolean" + }, + "chapAuthSession": { + "description": "chapAuthSession defines whether support iSCSI Session CHAP authentication", + "type": "boolean" + }, + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "type": "string" + }, + "initiatorName": { + "description": "initiatorName is the custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", + "type": "string" + }, + "iqn": { + "description": "iqn is the target iSCSI Qualified Name.", + "type": "string" + }, + "iscsiInterface": { + "description": "iscsiInterface is the interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "type": "string" + }, + "lun": { + "description": "lun represents iSCSI Target Lun number.", + "type": "integer", + "format": "int32" + }, + "portals": { + "description": "portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "type": "array", + "items": { + "type": "string" + } + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is the CHAP Secret for iSCSI target and initiator authentication", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "targetPortal": { + "description": "targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.KeyToPath": { + "description": "Maps a string key to a path within a volume.", + "type": "object", + "required": [ + "key", + "path" + ], + "properties": { + "key": { + "description": "key is the key to project.", + "type": "string" + }, + "mode": { + "description": "mode is Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "type": "integer", + "format": "int32" + }, + "path": { + "description": "path is the relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Lifecycle": { + "description": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + "type": "object", + "properties": { + "postStart": { + "description": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "$ref": "#/definitions/io.k8s.api.core.v1.LifecycleHandler" + }, + "preStop": { + "description": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The Pod's termination grace period countdown begins before the PreStop hook is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period (unless delayed by finalizers). Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "$ref": "#/definitions/io.k8s.api.core.v1.LifecycleHandler" + } + } + }, + "io.k8s.api.core.v1.LifecycleHandler": { + "description": "LifecycleHandler defines a specific action that should be taken in a lifecycle hook. One and only one of the fields, except TCPSocket must be specified.", + "type": "object", + "properties": { + "exec": { + "description": "Exec specifies the action to take.", + "$ref": "#/definitions/io.k8s.api.core.v1.ExecAction" + }, + "httpGet": { + "description": "HTTPGet specifies the http request to perform.", + "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction" + }, + "sleep": { + "description": "Sleep represents the duration that the container should sleep before being terminated.", + "$ref": "#/definitions/io.k8s.api.core.v1.SleepAction" + }, + "tcpSocket": { + "description": "Deprecated. TCPSocket is NOT supported as a LifecycleHandler and kept for the backward compatibility. There are no validation of this field and lifecycle hooks will fail in runtime when tcp handler is specified.", + "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction" + } + } + }, + "io.k8s.api.core.v1.LimitRange": { + "description": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.LimitRangeSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "LimitRange", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.LimitRangeItem": { + "description": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "default": { + "description": "Default resource requirement limit value by resource name if resource limit is omitted.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "defaultRequest": { + "description": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "max": { + "description": "Max usage constraints on this kind by resource name.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "maxLimitRequestRatio": { + "description": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "min": { + "description": "Min usage constraints on this kind by resource name.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "type": { + "description": "Type of resource that this limit applies to.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.LimitRangeList": { + "description": "LimitRangeList is a list of LimitRange items.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LimitRange" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "LimitRangeList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.LimitRangeSpec": { + "description": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", + "type": "object", + "required": [ + "limits" + ], + "properties": { + "limits": { + "description": "Limits is the list of LimitRangeItem objects that are enforced.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LimitRangeItem" + } + } + } + }, + "io.k8s.api.core.v1.LoadBalancerIngress": { + "description": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", + "type": "object", + "properties": { + "hostname": { + "description": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", + "type": "string" + }, + "ip": { + "description": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", + "type": "string" + }, + "ipMode": { + "description": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified. Setting this to \"VIP\" indicates that traffic is delivered to the node with the destination set to the load-balancer's IP and port. Setting this to \"Proxy\" indicates that traffic is delivered to the node or pod with the destination set to the node's IP and node port or the pod's IP and port. Service implementations may use this information to adjust traffic routing.", + "type": "string" + }, + "ports": { + "description": "Ports is a list of records of service ports If used, every port defined in the service should have an entry in it", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PortStatus" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.core.v1.LoadBalancerStatus": { + "description": "LoadBalancerStatus represents the status of a load-balancer.", + "type": "object", + "properties": { + "ingress": { + "description": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LoadBalancerIngress" + } + } + } + }, + "io.k8s.api.core.v1.LocalObjectReference": { + "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "type": "object", + "properties": { + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.LocalVolumeSource": { + "description": "Local represents directly-attached storage with node affinity (Beta feature)", + "type": "object", + "required": [ + "path" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a filesystem if unspecified.", + "type": "string" + }, + "path": { + "description": "path of the full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ModifyVolumeStatus": { + "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation", + "type": "object", + "required": [ + "status" + ], + "properties": { + "status": { + "description": "status is the status of the ControllerModifyVolume operation. It can be in any of following states:\n - Pending\n Pending indicates that the PersistentVolumeClaim cannot be modified due to unmet requirements, such as\n the specified VolumeAttributesClass not existing.\n - InProgress\n InProgress indicates that the volume is being modified.\n - Infeasible\n Infeasible indicates that the request has been rejected as invalid by the CSI driver. To\n\t resolve the error, a valid VolumeAttributesClass needs to be specified.\nNote: New statuses can be added in the future. Consumers should check for unknown statuses and fail appropriately.", + "type": "string" + }, + "targetVolumeAttributesClassName": { + "description": "targetVolumeAttributesClassName is the name of the VolumeAttributesClass the PVC currently being reconciled", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.NFSVolumeSource": { + "description": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "required": [ + "server", + "path" + ], + "properties": { + "path": { + "description": "path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "boolean" + }, + "server": { + "description": "server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Namespace": { + "description": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceSpec" + }, + "status": { + "description": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Namespace", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NamespaceCondition": { + "description": "NamespaceCondition contains details about state of namespace.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "type": "string" + }, + "reason": { + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of namespace controller condition.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.NamespaceList": { + "description": "NamespaceList is a list of Namespaces.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Namespace" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "NamespaceList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NamespaceSpec": { + "description": "NamespaceSpec describes the attributes on a Namespace.", + "type": "object", + "properties": { + "finalizers": { + "description": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.NamespaceStatus": { + "description": "NamespaceStatus is information about the current status of a Namespace.", + "type": "object", + "properties": { + "conditions": { + "description": "Represents the latest available observations of a namespace's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NamespaceCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "phase": { + "description": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Node": { + "description": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSpec" + }, + "status": { + "description": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Node", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NodeAddress": { + "description": "NodeAddress contains information for the node's address.", + "type": "object", + "required": [ + "type", + "address" + ], + "properties": { + "address": { + "description": "The node address.", + "type": "string" + }, + "type": { + "description": "Node address type, one of Hostname, ExternalIP or InternalIP.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.NodeAffinity": { + "description": "Node affinity is a group of node affinity scheduling rules.", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PreferredSchedulingTerm" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + } + } + }, + "io.k8s.api.core.v1.NodeCondition": { + "description": "NodeCondition contains condition information for a node.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastHeartbeatTime": { + "description": "Last time we got an update on a given condition.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastTransitionTime": { + "description": "Last time the condition transit from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "Human readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "(brief) reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of node condition.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.NodeConfigSource": { + "description": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. This API is deprecated since 1.22", + "type": "object", + "properties": { + "configMap": { + "description": "ConfigMap is a reference to a Node's ConfigMap", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapNodeConfigSource" + } + } + }, + "io.k8s.api.core.v1.NodeConfigStatus": { + "description": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.", + "type": "object", + "properties": { + "active": { + "description": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource" + }, + "assigned": { + "description": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource" + }, + "error": { + "description": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.", + "type": "string" + }, + "lastKnownGood": { + "description": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource" + } + } + }, + "io.k8s.api.core.v1.NodeDaemonEndpoints": { + "description": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", + "type": "object", + "properties": { + "kubeletEndpoint": { + "description": "Endpoint on which Kubelet is listening.", + "$ref": "#/definitions/io.k8s.api.core.v1.DaemonEndpoint" + } + } + }, + "io.k8s.api.core.v1.NodeList": { + "description": "NodeList is the whole list of all Nodes which have been registered with master.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of nodes", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Node" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "NodeList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.NodeSelector": { + "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "type": "object", + "required": [ + "nodeSelectorTerms" + ], + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorTerm" + } + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.NodeSelectorRequirement": { + "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "type": "object", + "required": [ + "key", + "operator" + ], + "properties": { + "key": { + "description": "The label key that the selector applies to.", + "type": "string" + }, + "operator": { + "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "type": "string" + }, + "values": { + "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.NodeSelectorTerm": { + "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", + "type": "object", + "properties": { + "matchExpressions": { + "description": "A list of node selector requirements by node's labels.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" + } + }, + "matchFields": { + "description": "A list of node selector requirements by node's fields.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorRequirement" + } + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.NodeSpec": { + "description": "NodeSpec describes the attributes that a node is created with.", + "type": "object", + "properties": { + "configSource": { + "description": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource" + }, + "externalID": { + "description": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", + "type": "string" + }, + "podCIDR": { + "description": "PodCIDR represents the pod IP range assigned to the node.", + "type": "string" + }, + "podCIDRs": { + "description": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-patch-strategy": "merge" + }, + "providerID": { + "description": "ID of the node assigned by the cloud provider in the format: \u003cProviderName\u003e://\u003cProviderSpecificNodeID\u003e", + "type": "string" + }, + "taints": { + "description": "If specified, the node's taints.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Taint" + } + }, + "unschedulable": { + "description": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.NodeStatus": { + "description": "NodeStatus is information about the current status of a node.", + "type": "object", + "properties": { + "addresses": { + "description": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See https://pr.k8s.io/79391 for an example. Consumers should assume that addresses can change during the lifetime of a Node. However, there are some exceptions where this may not be possible, such as Pods that inherit a Node's address in its own status or consumers of the downward API (status.hostIP).", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeAddress" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "allocatable": { + "description": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "capacity": { + "description": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "conditions": { + "description": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "config": { + "description": "Status of the config assigned to the node via the dynamic Kubelet config feature.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigStatus" + }, + "daemonEndpoints": { + "description": "Endpoints of daemons running on the Node.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeDaemonEndpoints" + }, + "images": { + "description": "List of container images on this node", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerImage" + } + }, + "nodeInfo": { + "description": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSystemInfo" + }, + "phase": { + "description": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", + "type": "string" + }, + "volumesAttached": { + "description": "List of volumes that are attached to the node.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.AttachedVolume" + } + }, + "volumesInUse": { + "description": "List of attachable volumes in use (mounted) by the node.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.NodeSystemInfo": { + "description": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "type": "object", + "required": [ + "machineID", + "systemUUID", + "bootID", + "kernelVersion", + "osImage", + "containerRuntimeVersion", + "kubeletVersion", + "kubeProxyVersion", + "operatingSystem", + "architecture" + ], + "properties": { + "architecture": { + "description": "The Architecture reported by the node", + "type": "string" + }, + "bootID": { + "description": "Boot ID reported by the node.", + "type": "string" + }, + "containerRuntimeVersion": { + "description": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", + "type": "string" + }, + "kernelVersion": { + "description": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + "type": "string" + }, + "kubeProxyVersion": { + "description": "KubeProxy Version reported by the node.", + "type": "string" + }, + "kubeletVersion": { + "description": "Kubelet Version reported by the node.", + "type": "string" + }, + "machineID": { + "description": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + "type": "string" + }, + "operatingSystem": { + "description": "The Operating System reported by the node", + "type": "string" + }, + "osImage": { + "description": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "type": "string" + }, + "systemUUID": { + "description": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ObjectFieldSelector": { + "description": "ObjectFieldSelector selects an APIVersioned field of an object.", + "type": "object", + "required": [ + "fieldPath" + ], + "properties": { + "apiVersion": { + "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + "type": "string" + }, + "fieldPath": { + "description": "Path of the field to select in the specified API version.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ObjectReference": { + "description": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "type": "object", + "properties": { + "apiVersion": { + "description": "API version of the referent.", + "type": "string" + }, + "fieldPath": { + "description": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", + "type": "string" + }, + "kind": { + "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "namespace": { + "description": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "type": "string" + }, + "resourceVersion": { + "description": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "uid": { + "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.PersistentVolume": { + "description": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec" + }, + "status": { + "description": "status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolume", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeClaim": { + "description": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec" + }, + "status": { + "description": "status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolumeClaim", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeClaimCondition": { + "description": "PersistentVolumeClaimCondition contains details about state of pvc", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastProbeTime": { + "description": "lastProbeTime is the time we probed the condition.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastTransitionTime": { + "description": "lastTransitionTime is the time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "message is the human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "reason is a unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "type": "string" + }, + "status": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PersistentVolumeClaimList": { + "description": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaim" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolumeClaimList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeClaimSpec": { + "description": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "type": "object", + "properties": { + "accessModes": { + "description": "accessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "type": "array", + "items": { + "type": "string" + } + }, + "dataSource": { + "description": "dataSource field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) * An existing PVC (PersistentVolumeClaim) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. If the namespace is specified, then dataSourceRef will not be copied to dataSource.", + "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference" + }, + "dataSourceRef": { + "description": "dataSourceRef specifies the object from which to populate the volume with data, if a non-empty volume is desired. This may be any object from a non-empty API group (non core object) or a PersistentVolumeClaim object. When this field is specified, volume binding will only succeed if the type of the specified object matches some installed volume populator or dynamic provisioner. This field will replace the functionality of the dataSource field and as such if both fields are non-empty, they must have the same value. For backwards compatibility, when namespace isn't specified in dataSourceRef, both fields (dataSource and dataSourceRef) will be set to the same value automatically if one of them is empty and the other is non-empty. When namespace is specified in dataSourceRef, dataSource isn't set to the same value and must be empty. There are three important differences between dataSource and dataSourceRef: * While dataSource only allows two specific types of objects, dataSourceRef\n allows any non-core object, as well as PersistentVolumeClaim objects.\n* While dataSource ignores disallowed values (dropping them), dataSourceRef\n preserves all values, and generates an error if a disallowed value is\n specified.\n* While dataSource only allows local objects, dataSourceRef allows objects\n in any namespaces.\n(Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "$ref": "#/definitions/io.k8s.api.core.v1.TypedObjectReference" + }, + "resources": { + "description": "resources represents the minimum resources the volume should have. If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements that are lower than previous value but must still be higher than capacity recorded in the status field of the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeResourceRequirements" + }, + "selector": { + "description": "selector is a label query over volumes to consider for binding.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "storageClassName": { + "description": "storageClassName is the name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "type": "string" + }, + "volumeAttributesClassName": { + "description": "volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. If specified, the CSI driver will create or update the volume with the attributes defined in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass will be applied to the claim but it's not allowed to reset this field to empty string once it is set. If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass will be set by the persistentvolume controller if it exists. If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource exists. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#volumeattributesclass (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled.", + "type": "string" + }, + "volumeMode": { + "description": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the binding reference to the PersistentVolume backing this claim.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PersistentVolumeClaimStatus": { + "description": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "type": "object", + "properties": { + "accessModes": { + "description": "accessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "type": "array", + "items": { + "type": "string" + } + }, + "allocatedResourceStatuses": { + "description": "allocatedResourceStatuses stores status of resource being resized for the given PVC. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nClaimResourceStatus can be in any of following states:\n\t- ControllerResizeInProgress:\n\t\tState set when resize controller starts resizing the volume in control-plane.\n\t- ControllerResizeFailed:\n\t\tState set when resize has failed in resize controller with a terminal error.\n\t- NodeResizePending:\n\t\tState set when resize controller has finished resizing the volume but further resizing of\n\t\tvolume is needed on the node.\n\t- NodeResizeInProgress:\n\t\tState set when kubelet starts resizing the volume.\n\t- NodeResizeFailed:\n\t\tState set when resizing has failed in kubelet with a terminal error. Transient errors don't set\n\t\tNodeResizeFailed.\nFor example: if expanding a PVC for more capacity - this field can be one of the following states:\n\t- pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"ControllerResizeFailed\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizePending\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeInProgress\"\n - pvc.status.allocatedResourceStatus['storage'] = \"NodeResizeFailed\"\nWhen this field is not set, it means that no resize operation is in progress for the given PVC.\n\nA controller that receives PVC update with previously unknown resourceName or ClaimResourceStatus should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-kubernetes-map-type": "granular" + }, + "allocatedResources": { + "description": "allocatedResources tracks the resources allocated to a PVC including its capacity. Key names follow standard Kubernetes label syntax. Valid values are either:\n\t* Un-prefixed keys:\n\t\t- storage - the capacity of the volume.\n\t* Custom resources must use implementation-defined prefixed names such as \"example.com/my-custom-resource\"\nApart from above values - keys that are unprefixed or have kubernetes.io prefix are considered reserved and hence may not be used.\n\nCapacity reported here may be larger than the actual capacity when a volume expansion operation is requested. For storage quota, the larger value from allocatedResources and PVC.spec.resources is used. If allocatedResources is not set, PVC.spec.resources alone is used for quota calculation. If a volume expansion capacity request is lowered, allocatedResources is only lowered if there are no expansion operations in progress and if the actual volume capacity is equal or lower than the requested capacity.\n\nA controller that receives PVC update with previously unknown resourceName should ignore the update for the purpose it was designed. For example - a controller that only is responsible for resizing capacity of the volume, should ignore PVC updates that change other valid resources associated with PVC.\n\nThis is an alpha field and requires enabling RecoverVolumeExpansionFailure feature.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "capacity": { + "description": "capacity represents the actual resources of the underlying volume.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "conditions": { + "description": "conditions is the current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentVolumeAttributesClassName": { + "description": "currentVolumeAttributesClassName is the current name of the VolumeAttributesClass the PVC is using. When unset, there is no VolumeAttributeClass applied to this PersistentVolumeClaim This is an alpha field and requires enabling VolumeAttributesClass feature.", + "type": "string" + }, + "modifyVolumeStatus": { + "description": "ModifyVolumeStatus represents the status object of ControllerModifyVolume operation. When this is unset, there is no ModifyVolume operation being attempted. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "$ref": "#/definitions/io.k8s.api.core.v1.ModifyVolumeStatus" + }, + "phase": { + "description": "phase represents the current phase of PersistentVolumeClaim.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PersistentVolumeClaimTemplate": { + "description": "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "metadata": { + "description": "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec" + } + } + }, + "io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource": { + "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + "type": "object", + "required": [ + "claimName" + ], + "properties": { + "claimName": { + "description": "claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "type": "string" + }, + "readOnly": { + "description": "readOnly Will force the ReadOnly setting in VolumeMounts. Default false.", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.PersistentVolumeList": { + "description": "PersistentVolumeList is a list of PersistentVolume items.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolume" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PersistentVolumeList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PersistentVolumeSpec": { + "description": "PersistentVolumeSpec is the specification of a persistent volume.", + "type": "object", + "properties": { + "accessModes": { + "description": "accessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", + "type": "array", + "items": { + "type": "string" + } + }, + "awsElasticBlockStore": { + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" + }, + "azureDisk": { + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource" + }, + "azureFile": { + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.AzureFilePersistentVolumeSource" + }, + "capacity": { + "description": "capacity is the description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "cephfs": { + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "$ref": "#/definitions/io.k8s.api.core.v1.CephFSPersistentVolumeSource" + }, + "cinder": { + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.CinderPersistentVolumeSource" + }, + "claimRef": { + "description": "claimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", + "x-kubernetes-map-type": "granular", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "csi": { + "description": "csi represents storage that is handled by an external CSI driver (Beta feature).", + "$ref": "#/definitions/io.k8s.api.core.v1.CSIPersistentVolumeSource" + }, + "fc": { + "description": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" + }, + "flexVolume": { + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "$ref": "#/definitions/io.k8s.api.core.v1.FlexPersistentVolumeSource" + }, + "flocker": { + "description": "flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource" + }, + "gcePersistentDisk": { + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" + }, + "glusterfs": { + "description": "glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsPersistentVolumeSource" + }, + "hostPath": { + "description": "hostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource" + }, + "iscsi": { + "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIPersistentVolumeSource" + }, + "local": { + "description": "local represents directly-attached storage with node affinity", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalVolumeSource" + }, + "mountOptions": { + "description": "mountOptions is the list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "type": "array", + "items": { + "type": "string" + } + }, + "nfs": { + "description": "nfs represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource" + }, + "nodeAffinity": { + "description": "nodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeNodeAffinity" + }, + "persistentVolumeReclaimPolicy": { + "description": "persistentVolumeReclaimPolicy defines what happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", + "type": "string" + }, + "photonPersistentDisk": { + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" + }, + "portworxVolume": { + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource" + }, + "quobyte": { + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource" + }, + "rbd": { + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.RBDPersistentVolumeSource" + }, + "scaleIO": { + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOPersistentVolumeSource" + }, + "storageClassName": { + "description": "storageClassName is the name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", + "type": "string" + }, + "storageos": { + "description": "storageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSPersistentVolumeSource" + }, + "volumeAttributesClassName": { + "description": "Name of VolumeAttributesClass to which this persistent volume belongs. Empty value is not allowed. When this field is not set, it indicates that this volume does not belong to any VolumeAttributesClass. This field is mutable and can be changed by the CSI driver after a volume has been updated successfully to a new class. For an unbound PersistentVolume, the volumeAttributesClassName will be matched with unbound PersistentVolumeClaims during the binding process. This is an alpha field and requires enabling VolumeAttributesClass feature.", + "type": "string" + }, + "volumeMode": { + "description": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", + "type": "string" + }, + "vsphereVolume": { + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" + } + } + }, + "io.k8s.api.core.v1.PersistentVolumeStatus": { + "description": "PersistentVolumeStatus is the current status of a persistent volume.", + "type": "object", + "properties": { + "lastPhaseTransitionTime": { + "description": "lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. This is a beta field and requires the PersistentVolumeLastPhaseTransitionTime feature to be enabled (enabled by default).", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "message is a human-readable message indicating details about why the volume is in this state.", + "type": "string" + }, + "phase": { + "description": "phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", + "type": "string" + }, + "reason": { + "description": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource": { + "description": "Represents a Photon Controller persistent disk resource.", + "type": "object", + "required": [ + "pdID" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "pdID": { + "description": "pdID is the ID that identifies Photon Controller persistent disk", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Pod": { + "description": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.PodSpec" + }, + "status": { + "description": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.PodStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Pod", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodAffinity": { + "description": "Pod affinity is a group of inter pod affinity scheduling rules.", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" + } + } + } + }, + "io.k8s.api.core.v1.PodAffinityTerm": { + "description": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key \u003ctopologyKey\u003e matches that of any node on which a pod of the set of pods is running", + "type": "object", + "required": [ + "topologyKey" + ], + "properties": { + "labelSelector": { + "description": "A label query over a set of resources, in this case pods. If it's null, this PodAffinityTerm matches with no Pods.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key in (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. Also, MatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "mismatchLabelKeys": { + "description": "MismatchLabelKeys is a set of pod label keys to select which pods will be taken into consideration. The keys are used to lookup values from the incoming pod labels, those key-value labels are merged with `LabelSelector` as `key notin (value)` to select the group of existing pods which pods will be taken into consideration for the incoming pod's pod (anti) affinity. Keys that don't exist in the incoming pod labels will be ignored. The default value is empty. The same key is forbidden to exist in both MismatchLabelKeys and LabelSelector. Also, MismatchLabelKeys cannot be set when LabelSelector isn't set. This is an alpha field and requires enabling MatchLabelKeysInPodAffinity feature gate.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "namespaceSelector": { + "description": "A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means \"this pod's namespace\". An empty selector ({}) matches all namespaces.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "namespaces": { + "description": "namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means \"this pod's namespace\".", + "type": "array", + "items": { + "type": "string" + } + }, + "topologyKey": { + "description": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodAntiAffinity": { + "description": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.WeightedPodAffinityTerm" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" + } + } + } + }, + "io.k8s.api.core.v1.PodCondition": { + "description": "PodCondition contains details for the current condition of this pod.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastProbeTime": { + "description": "Last time we probed the condition.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "Human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "Unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "type": "string" + }, + "type": { + "description": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodDNSConfig": { + "description": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "type": "object", + "properties": { + "nameservers": { + "description": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + "type": "array", + "items": { + "type": "string" + } + }, + "options": { + "description": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfigOption" + } + }, + "searches": { + "description": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.PodDNSConfigOption": { + "description": "PodDNSConfigOption defines DNS resolver options of a pod.", + "type": "object", + "properties": { + "name": { + "description": "Required.", + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodIP": { + "description": "PodIP represents a single IP address allocated to the pod.", + "type": "object", + "properties": { + "ip": { + "description": "IP is the IP address assigned to the pod", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodList": { + "description": "PodList is a list of Pods.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Pod" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PodList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodOS": { + "description": "PodOS defines the OS parameters of a pod.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name is the name of the operating system. The currently supported values are linux and windows. Additional value may be defined in future and can be one of: https://github.com/opencontainers/runtime-spec/blob/master/config.md#platform-specific-configuration Clients should expect to handle additional values and treat unrecognized values in this field as os: null", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodReadinessGate": { + "description": "PodReadinessGate contains the reference to a pod condition", + "type": "object", + "required": [ + "conditionType" + ], + "properties": { + "conditionType": { + "description": "ConditionType refers to a condition in the pod's condition list with matching type.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodResourceClaim": { + "description": "PodResourceClaim references exactly one ResourceClaim through a ClaimSource. It adds a name to it that uniquely identifies the ResourceClaim inside the Pod. Containers that need access to the ResourceClaim reference it with this name.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name uniquely identifies this resource claim inside the pod. This must be a DNS_LABEL.", + "type": "string" + }, + "source": { + "description": "Source describes where to find the ResourceClaim.", + "$ref": "#/definitions/io.k8s.api.core.v1.ClaimSource" + } + } + }, + "io.k8s.api.core.v1.PodResourceClaimStatus": { + "description": "PodResourceClaimStatus is stored in the PodStatus for each PodResourceClaim which references a ResourceClaimTemplate. It stores the generated name for the corresponding ResourceClaim.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name uniquely identifies this resource claim inside the pod. This must match the name of an entry in pod.spec.resourceClaims, which implies that the string must be a DNS_LABEL.", + "type": "string" + }, + "resourceClaimName": { + "description": "ResourceClaimName is the name of the ResourceClaim that was generated for the Pod in the namespace of the Pod. It this is unset, then generating a ResourceClaim was not necessary. The pod.spec.resourceClaims entry can be ignored in this case.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodSchedulingGate": { + "description": "PodSchedulingGate is associated to a Pod to guard its scheduling.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name of the scheduling gate. Each scheduling gate must have a unique name field.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PodSecurityContext": { + "description": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "type": "object", + "properties": { + "fsGroup": { + "description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw----\n\nIf unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows.", + "type": "integer", + "format": "int64" + }, + "fsGroupChangePolicy": { + "description": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified, \"Always\" is used. Note that this field cannot be set when spec.os.name is windows.", + "type": "string" + }, + "runAsGroup": { + "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "type": "integer", + "format": "int64" + }, + "runAsNonRoot": { + "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "type": "boolean" + }, + "runAsUser": { + "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "type": "integer", + "format": "int64" + }, + "seLinuxOptions": { + "description": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container. Note that this field cannot be set when spec.os.name is windows.", + "$ref": "#/definitions/io.k8s.api.core.v1.SELinuxOptions" + }, + "seccompProfile": { + "description": "The seccomp options to use by the containers in this pod. Note that this field cannot be set when spec.os.name is windows.", + "$ref": "#/definitions/io.k8s.api.core.v1.SeccompProfile" + }, + "supplementalGroups": { + "description": "A list of groups applied to the first process run in each container, in addition to the container's primary GID, the fsGroup (if specified), and group memberships defined in the container image for the uid of the container process. If unspecified, no additional groups are added to any container. Note that group memberships defined in the container image for the uid of the container process are still effective, even if they are not included in this list. Note that this field cannot be set when spec.os.name is windows.", + "type": "array", + "items": { + "type": "integer", + "format": "int64" + } + }, + "sysctls": { + "description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch. Note that this field cannot be set when spec.os.name is windows.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Sysctl" + } + }, + "windowsOptions": { + "description": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "$ref": "#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions" + } + } + }, + "io.k8s.api.core.v1.PodSpec": { + "description": "PodSpec is a description of a pod.", + "type": "object", + "required": [ + "containers" + ], + "properties": { + "activeDeadlineSeconds": { + "description": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", + "type": "integer", + "format": "int64" + }, + "affinity": { + "description": "If specified, the pod's scheduling constraints", + "$ref": "#/definitions/io.k8s.api.core.v1.Affinity" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "type": "boolean" + }, + "containers": { + "description": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "dnsConfig": { + "description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + "$ref": "#/definitions/io.k8s.api.core.v1.PodDNSConfig" + }, + "dnsPolicy": { + "description": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "type": "string" + }, + "enableServiceLinks": { + "description": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "type": "boolean" + }, + "ephemeralContainers": { + "description": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralContainer" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "hostAliases": { + "description": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.HostAlias" + }, + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge" + }, + "hostIPC": { + "description": "Use the host's ipc namespace. Optional: Default to false.", + "type": "boolean" + }, + "hostNetwork": { + "description": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "type": "boolean" + }, + "hostPID": { + "description": "Use the host's pid namespace. Optional: Default to false.", + "type": "boolean" + }, + "hostUsers": { + "description": "Use the host's user namespace. Optional: Default to true. If set to true or not present, the pod will be run in the host user namespace, useful for when the pod needs a feature only available to the host user namespace, such as loading a kernel module with CAP_SYS_MODULE. When set to false, a new userns is created for the pod. Setting false is useful for mitigating container breakout vulnerabilities even allowing users to run their containers as root without actually having root privileges on the host. This field is alpha-level and is only honored by servers that enable the UserNamespacesSupport feature.", + "type": "boolean" + }, + "hostname": { + "description": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + "type": "string" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "initContainers": { + "description": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Container" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "nodeName": { + "description": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "type": "string" + }, + "nodeSelector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-kubernetes-map-type": "atomic" + }, + "os": { + "description": "Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set.\n\nIf the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions\n\nIf the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC - spec.hostUsers - spec.securityContext.seLinuxOptions - spec.securityContext.seccompProfile - spec.securityContext.fsGroup - spec.securityContext.fsGroupChangePolicy - spec.securityContext.sysctls - spec.shareProcessNamespace - spec.securityContext.runAsUser - spec.securityContext.runAsGroup - spec.securityContext.supplementalGroups - spec.containers[*].securityContext.seLinuxOptions - spec.containers[*].securityContext.seccompProfile - spec.containers[*].securityContext.capabilities - spec.containers[*].securityContext.readOnlyRootFilesystem - spec.containers[*].securityContext.privileged - spec.containers[*].securityContext.allowPrivilegeEscalation - spec.containers[*].securityContext.procMount - spec.containers[*].securityContext.runAsUser - spec.containers[*].securityContext.runAsGroup", + "$ref": "#/definitions/io.k8s.api.core.v1.PodOS" + }, + "overhead": { + "description": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/688-pod-overhead/README.md", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "preemptionPolicy": { + "description": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "type": "string" + }, + "priority": { + "description": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + "type": "integer", + "format": "int32" + }, + "priorityClassName": { + "description": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + "type": "string" + }, + "readinessGates": { + "description": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/580-pod-readiness-gates", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodReadinessGate" + } + }, + "resourceClaims": { + "description": "ResourceClaims defines which ResourceClaims must be allocated and reserved before the Pod is allowed to start. The resources will be made available to those containers which consume them by name.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaim" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, + "restartPolicy": { + "description": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. In some contexts, only a subset of those values may be permitted. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "type": "string" + }, + "runtimeClassName": { + "description": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/585-runtime-class", + "type": "string" + }, + "schedulerName": { + "description": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + "type": "string" + }, + "schedulingGates": { + "description": "SchedulingGates is an opaque list of values that if specified will block scheduling the pod. If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod.\n\nSchedulingGates can only be set at pod creation time, and be removed only afterwards.\n\nThis is a beta feature enabled by the PodSchedulingReadiness feature gate.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodSchedulingGate" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + }, + "securityContext": { + "description": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "$ref": "#/definitions/io.k8s.api.core.v1.PodSecurityContext" + }, + "serviceAccount": { + "description": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "type": "string" + }, + "serviceAccountName": { + "description": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "type": "string" + }, + "setHostnameAsFQDN": { + "description": "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", + "type": "boolean" + }, + "shareProcessNamespace": { + "description": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", + "type": "boolean" + }, + "subdomain": { + "description": "If specified, the fully qualified Pod hostname will be \"\u003chostname\u003e.\u003csubdomain\u003e.\u003cpod namespace\u003e.svc.\u003ccluster domain\u003e\". If not specified, the pod will not have a domainname at all.", + "type": "string" + }, + "terminationGracePeriodSeconds": { + "description": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + "type": "integer", + "format": "int64" + }, + "tolerations": { + "description": "If specified, the pod's tolerations.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + } + }, + "topologySpreadConstraints": { + "description": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. All topologySpreadConstraints are ANDed.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.TopologySpreadConstraint" + }, + "x-kubernetes-list-map-keys": [ + "topologyKey", + "whenUnsatisfiable" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "topologyKey", + "x-kubernetes-patch-strategy": "merge" + }, + "volumes": { + "description": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Volume" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + } + } + }, + "io.k8s.api.core.v1.PodStatus": { + "description": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "type": "object", + "properties": { + "conditions": { + "description": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "containerStatuses": { + "description": "The list has one entry per container in the manifest. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" + } + }, + "ephemeralContainerStatuses": { + "description": "Status for any ephemeral containers that have run in this pod.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" + } + }, + "hostIP": { + "description": "hostIP holds the IP address of the host to which the pod is assigned. Empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns mean that HostIP will not be updated even if there is a node is assigned to pod", + "type": "string" + }, + "hostIPs": { + "description": "hostIPs holds the IP addresses allocated to the host. If this field is specified, the first entry must match the hostIP field. This list is empty if the pod has not started yet. A pod can be assigned to a node that has a problem in kubelet which in turns means that HostIPs will not be updated even if there is a node is assigned to this pod.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.HostIP" + }, + "x-kubernetes-list-type": "atomic", + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge" + }, + "initContainerStatuses": { + "description": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ContainerStatus" + } + }, + "message": { + "description": "A human readable message indicating details about why the pod is in this condition.", + "type": "string" + }, + "nominatedNodeName": { + "description": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "type": "string" + }, + "phase": { + "description": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "type": "string" + }, + "podIP": { + "description": "podIP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "type": "string" + }, + "podIPs": { + "description": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodIP" + }, + "x-kubernetes-patch-merge-key": "ip", + "x-kubernetes-patch-strategy": "merge" + }, + "qosClass": { + "description": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-qos/#quality-of-service-classes", + "type": "string" + }, + "reason": { + "description": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "type": "string" + }, + "resize": { + "description": "Status of resources resize desired for pod's containers. It is empty if no resources resize is pending. Any changes to container resources will automatically set this to \"Proposed\"", + "type": "string" + }, + "resourceClaimStatuses": { + "description": "Status of resource claims.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodResourceClaimStatus" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge,retainKeys" + }, + "startTime": { + "description": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "io.k8s.api.core.v1.PodTemplate": { + "description": "PodTemplate describes a template for creating copies of a predefined pod.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "template": { + "description": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PodTemplate", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodTemplateList": { + "description": "PodTemplateList is a list of PodTemplates.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of pod templates", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplate" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "PodTemplateList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.PodTemplateSpec": { + "description": "PodTemplateSpec describes the data a pod should have when created from a template", + "type": "object", + "properties": { + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.PodSpec" + } + } + }, + "io.k8s.api.core.v1.PortStatus": { + "type": "object", + "required": [ + "port", + "protocol" + ], + "properties": { + "error": { + "description": "Error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "type": "string" + }, + "port": { + "description": "Port is the port number of the service port of which status is recorded here", + "type": "integer", + "format": "int32" + }, + "protocol": { + "description": "Protocol is the protocol of the service port of which status is recorded here The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PortworxVolumeSource": { + "description": "PortworxVolumeSource represents a Portworx volume resource.", + "type": "object", + "required": [ + "volumeID" + ], + "properties": { + "fsType": { + "description": "fSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "volumeID": { + "description": "volumeID uniquely identifies a Portworx volume", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.PreferredSchedulingTerm": { + "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "type": "object", + "required": [ + "weight", + "preference" + ], + "properties": { + "preference": { + "description": "A node selector term, associated with the corresponding weight.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelectorTerm" + }, + "weight": { + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.core.v1.Probe": { + "description": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "type": "object", + "properties": { + "exec": { + "description": "Exec specifies the action to take.", + "$ref": "#/definitions/io.k8s.api.core.v1.ExecAction" + }, + "failureThreshold": { + "description": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", + "type": "integer", + "format": "int32" + }, + "grpc": { + "description": "GRPC specifies an action involving a GRPC port.", + "$ref": "#/definitions/io.k8s.api.core.v1.GRPCAction" + }, + "httpGet": { + "description": "HTTPGet specifies the http request to perform.", + "$ref": "#/definitions/io.k8s.api.core.v1.HTTPGetAction" + }, + "initialDelaySeconds": { + "description": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "type": "integer", + "format": "int32" + }, + "periodSeconds": { + "description": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", + "type": "integer", + "format": "int32" + }, + "successThreshold": { + "description": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + "type": "integer", + "format": "int32" + }, + "tcpSocket": { + "description": "TCPSocket specifies an action involving a TCP port.", + "$ref": "#/definitions/io.k8s.api.core.v1.TCPSocketAction" + }, + "terminationGracePeriodSeconds": { + "description": "Optional duration in seconds the pod needs to terminate gracefully upon probe failure. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this value overrides the value provided by the pod spec. Value must be non-negative integer. The value zero indicates stop immediately via the kill signal (no opportunity to shut down). This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset.", + "type": "integer", + "format": "int64" + }, + "timeoutSeconds": { + "description": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.core.v1.ProjectedVolumeSource": { + "description": "Represents a projected volume source", + "type": "object", + "properties": { + "defaultMode": { + "description": "defaultMode are the mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "type": "integer", + "format": "int32" + }, + "sources": { + "description": "sources is the list of volume projections", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.VolumeProjection" + } + } + } + }, + "io.k8s.api.core.v1.QuobyteVolumeSource": { + "description": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + "type": "object", + "required": [ + "registry", + "volume" + ], + "properties": { + "group": { + "description": "group to map volume access to Default is no group", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "type": "boolean" + }, + "registry": { + "description": "registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "type": "string" + }, + "tenant": { + "description": "tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", + "type": "string" + }, + "user": { + "description": "user to map volume access to Defaults to serivceaccount user", + "type": "string" + }, + "volume": { + "description": "volume is a string that references an already created Quobyte volume by name.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.RBDPersistentVolumeSource": { + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "monitors", + "image" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "type": "string" + }, + "image": { + "description": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "keyring": { + "description": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "monitors": { + "description": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "array", + "items": { + "type": "string" + } + }, + "pool": { + "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "user": { + "description": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.RBDVolumeSource": { + "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "type": "object", + "required": [ + "monitors", + "image" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "type": "string" + }, + "image": { + "description": "image is the rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "keyring": { + "description": "keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "monitors": { + "description": "monitors is a collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "array", + "items": { + "type": "string" + } + }, + "pool": { + "description": "pool is the rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + }, + "readOnly": { + "description": "readOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "user": { + "description": "user is the rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ReplicationController": { + "description": "ReplicationController represents the configuration of a replication controller.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerSpec" + }, + "status": { + "description": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ReplicationController", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ReplicationControllerCondition": { + "description": "ReplicationControllerCondition describes the state of a replication controller at a certain point.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "The last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "A human readable message indicating details about the transition.", + "type": "string" + }, + "reason": { + "description": "The reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "Status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "Type of replication controller condition.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ReplicationControllerList": { + "description": "ReplicationControllerList is a collection of replication controllers.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationController" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ReplicationControllerList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ReplicationControllerSpec": { + "description": "ReplicationControllerSpec is the specification of a replication controller.", + "type": "object", + "properties": { + "minReadySeconds": { + "description": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "type": "integer", + "format": "int32" + }, + "replicas": { + "description": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "type": "integer", + "format": "int32" + }, + "selector": { + "description": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-kubernetes-map-type": "atomic" + }, + "template": { + "description": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. The only allowed template.spec.restartPolicy value is \"Always\". More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "$ref": "#/definitions/io.k8s.api.core.v1.PodTemplateSpec" + } + } + }, + "io.k8s.api.core.v1.ReplicationControllerStatus": { + "description": "ReplicationControllerStatus represents the current status of a replication controller.", + "type": "object", + "required": [ + "replicas" + ], + "properties": { + "availableReplicas": { + "description": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", + "type": "integer", + "format": "int32" + }, + "conditions": { + "description": "Represents the latest available observations of a replication controller's current state.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ReplicationControllerCondition" + }, + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "fullyLabeledReplicas": { + "description": "The number of pods that have labels matching the labels of the pod template of the replication controller.", + "type": "integer", + "format": "int32" + }, + "observedGeneration": { + "description": "ObservedGeneration reflects the generation of the most recently observed replication controller.", + "type": "integer", + "format": "int64" + }, + "readyReplicas": { + "description": "The number of ready replicas for this replication controller.", + "type": "integer", + "format": "int32" + }, + "replicas": { + "description": "Replicas is the most recently observed number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.core.v1.ResourceClaim": { + "description": "ResourceClaim references one entry in PodSpec.ResourceClaims.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ResourceFieldSelector": { + "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "type": "object", + "required": [ + "resource" + ], + "properties": { + "containerName": { + "description": "Container name: required for volumes, optional for env vars", + "type": "string" + }, + "divisor": { + "description": "Specifies the output format of the exposed resources, defaults to \"1\"", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "resource": { + "description": "Required: resource to select", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ResourceQuota": { + "description": "ResourceQuota sets aggregate quota restrictions enforced per namespace", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuotaSpec" + }, + "status": { + "description": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuotaStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ResourceQuota", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ResourceQuotaList": { + "description": "ResourceQuotaList is a list of ResourceQuota items.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceQuota" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ResourceQuotaList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ResourceQuotaSpec": { + "description": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + "type": "object", + "properties": { + "hard": { + "description": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "scopeSelector": { + "description": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.", + "$ref": "#/definitions/io.k8s.api.core.v1.ScopeSelector" + }, + "scopes": { + "description": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.ResourceQuotaStatus": { + "description": "ResourceQuotaStatus defines the enforced hard limits and observed use.", + "type": "object", + "properties": { + "hard": { + "description": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "used": { + "description": "Used is the current observed total usage of the resource in the namespace.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + } + }, + "io.k8s.api.core.v1.ResourceRequirements": { + "description": "ResourceRequirements describes the compute resource requirements.", + "type": "object", + "properties": { + "claims": { + "description": "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container.\n\nThis is an alpha field and requires enabling the DynamicResourceAllocation feature gate.\n\nThis field is immutable. It can only be set for containers.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ResourceClaim" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, + "limits": { + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "requests": { + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + } + }, + "io.k8s.api.core.v1.SELinuxOptions": { + "description": "SELinuxOptions are the labels to be applied to the container", + "type": "object", + "properties": { + "level": { + "description": "Level is SELinux level label that applies to the container.", + "type": "string" + }, + "role": { + "description": "Role is a SELinux role label that applies to the container.", + "type": "string" + }, + "type": { + "description": "Type is a SELinux type label that applies to the container.", + "type": "string" + }, + "user": { + "description": "User is a SELinux user label that applies to the container.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ScaleIOPersistentVolumeSource": { + "description": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", + "type": "object", + "required": [ + "gateway", + "system", + "secretRef" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", + "type": "string" + }, + "gateway": { + "description": "gateway is the host address of the ScaleIO API Gateway.", + "type": "string" + }, + "protectionDomain": { + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretReference" + }, + "sslEnabled": { + "description": "sslEnabled is the flag to enable/disable SSL communication with Gateway, default false", + "type": "boolean" + }, + "storageMode": { + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "type": "string" + }, + "storagePool": { + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", + "type": "string" + }, + "system": { + "description": "system is the name of the storage system as configured in ScaleIO.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ScaleIOVolumeSource": { + "description": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "type": "object", + "required": [ + "gateway", + "system", + "secretRef" + ], + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + "type": "string" + }, + "gateway": { + "description": "gateway is the host address of the ScaleIO API Gateway.", + "type": "string" + }, + "protectionDomain": { + "description": "protectionDomain is the name of the ScaleIO Protection Domain for the configured storage.", + "type": "string" + }, + "readOnly": { + "description": "readOnly Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "sslEnabled": { + "description": "sslEnabled Flag enable/disable SSL communication with Gateway, default false", + "type": "boolean" + }, + "storageMode": { + "description": "storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "type": "string" + }, + "storagePool": { + "description": "storagePool is the ScaleIO Storage Pool associated with the protection domain.", + "type": "string" + }, + "system": { + "description": "system is the name of the storage system as configured in ScaleIO.", + "type": "string" + }, + "volumeName": { + "description": "volumeName is the name of a volume already created in the ScaleIO system that is associated with this volume source.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ScopeSelector": { + "description": "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.", + "type": "object", + "properties": { + "matchExpressions": { + "description": "A list of scope selector requirements by scope of the resources.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ScopedResourceSelectorRequirement" + } + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.ScopedResourceSelectorRequirement": { + "description": "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.", + "type": "object", + "required": [ + "scopeName", + "operator" + ], + "properties": { + "operator": { + "description": "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.", + "type": "string" + }, + "scopeName": { + "description": "The name of the scope that the selector applies to.", + "type": "string" + }, + "values": { + "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.SeccompProfile": { + "description": "SeccompProfile defines a pod/container's seccomp profile settings. Only one profile source may be set.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "localhostProfile": { + "description": "localhostProfile indicates a profile defined in a file on the node should be used. The profile must be preconfigured on the node to work. Must be a descending path, relative to the kubelet's configured seccomp profile location. Must be set if type is \"Localhost\". Must NOT be set for any other type.", + "type": "string" + }, + "type": { + "description": "type indicates which kind of seccomp profile will be applied. Valid options are:\n\nLocalhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied.", + "type": "string" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "localhostProfile": "LocalhostProfile" + } + } + ] + }, + "io.k8s.api.core.v1.Secret": { + "description": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "data": { + "description": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + "type": "object", + "additionalProperties": { + "type": "string", + "format": "byte" + } + }, + "immutable": { + "description": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil.", + "type": "boolean" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "stringData": { + "description": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only input field for convenience. All keys and values are merged into the data field on write, overwriting any existing values. The stringData field is never output when reading from the API.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "type": { + "description": "Used to facilitate programmatic handling of secret data. More info: https://kubernetes.io/docs/concepts/configuration/secret/#secret-types", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Secret", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.SecretEnvSource": { + "description": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "type": "object", + "properties": { + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the Secret must be defined", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.SecretKeySelector": { + "description": "SecretKeySelector selects a key of a Secret.", + "type": "object", + "required": [ + "key" + ], + "properties": { + "key": { + "description": "The key of the secret to select from. Must be a valid secret key.", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "Specify whether the Secret or its key must be defined", + "type": "boolean" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.SecretList": { + "description": "SecretList is a list of Secret.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Secret" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "SecretList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.SecretProjection": { + "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "type": "object", + "properties": { + "items": { + "description": "items if unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + } + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "optional": { + "description": "optional field specify whether the Secret or its key must be defined", + "type": "boolean" + } + } + }, + "io.k8s.api.core.v1.SecretReference": { + "description": "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", + "type": "object", + "properties": { + "name": { + "description": "name is unique within a namespace to reference a secret resource.", + "type": "string" + }, + "namespace": { + "description": "namespace defines the space within which the secret name must be unique.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.SecretVolumeSource": { + "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + "type": "object", + "properties": { + "defaultMode": { + "description": "defaultMode is Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "type": "integer", + "format": "int32" + }, + "items": { + "description": "items If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.KeyToPath" + } + }, + "optional": { + "description": "optional field specify whether the Secret or its keys must be defined", + "type": "boolean" + }, + "secretName": { + "description": "secretName is the name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.SecurityContext": { + "description": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "type": "object", + "properties": { + "allowPrivilegeEscalation": { + "description": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN Note that this field cannot be set when spec.os.name is windows.", + "type": "boolean" + }, + "capabilities": { + "description": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime. Note that this field cannot be set when spec.os.name is windows.", + "$ref": "#/definitions/io.k8s.api.core.v1.Capabilities" + }, + "privileged": { + "description": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false. Note that this field cannot be set when spec.os.name is windows.", + "type": "boolean" + }, + "procMount": { + "description": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled. Note that this field cannot be set when spec.os.name is windows.", + "type": "string" + }, + "readOnlyRootFilesystem": { + "description": "Whether this container has a read-only root filesystem. Default is false. Note that this field cannot be set when spec.os.name is windows.", + "type": "boolean" + }, + "runAsGroup": { + "description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "type": "integer", + "format": "int64" + }, + "runAsNonRoot": { + "description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "type": "boolean" + }, + "runAsUser": { + "description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "type": "integer", + "format": "int64" + }, + "seLinuxOptions": { + "description": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is windows.", + "$ref": "#/definitions/io.k8s.api.core.v1.SELinuxOptions" + }, + "seccompProfile": { + "description": "The seccomp options to use by this container. If seccomp options are provided at both the pod \u0026 container level, the container options override the pod options. Note that this field cannot be set when spec.os.name is windows.", + "$ref": "#/definitions/io.k8s.api.core.v1.SeccompProfile" + }, + "windowsOptions": { + "description": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. Note that this field cannot be set when spec.os.name is linux.", + "$ref": "#/definitions/io.k8s.api.core.v1.WindowsSecurityContextOptions" + } + } + }, + "io.k8s.api.core.v1.Service": { + "description": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceSpec" + }, + "status": { + "description": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Service", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServiceAccount": { + "description": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "automountServiceAccountToken": { + "description": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", + "type": "boolean" + }, + "imagePullSecrets": { + "description": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "secrets": { + "description": "Secrets is a list of the secrets in the same namespace that pods running using this ServiceAccount are allowed to use. Pods are only limited to this list if this service account has a \"kubernetes.io/enforce-mountable-secrets\" annotation set to \"true\". This field should not be used to find auto-generated service account token secrets for use outside of pods. Instead, tokens can be requested directly using the TokenRequest API, or service account token secrets can be manually created. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ServiceAccount", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServiceAccountList": { + "description": "ServiceAccountList is a list of ServiceAccount objects", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceAccount" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ServiceAccountList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServiceAccountTokenProjection": { + "description": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", + "type": "object", + "required": [ + "path" + ], + "properties": { + "audience": { + "description": "audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "type": "string" + }, + "expirationSeconds": { + "description": "expirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "type": "integer", + "format": "int64" + }, + "path": { + "description": "path is the path relative to the mount point of the file to project the token into.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ServiceList": { + "description": "ServiceList holds a list of services.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "List of services", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Service" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ServiceList", + "version": "v1" + } + ] + }, + "io.k8s.api.core.v1.ServicePort": { + "description": "ServicePort contains information on service's port.", + "type": "object", + "required": [ + "port" + ], + "properties": { + "appProtocol": { + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "type": "string" + }, + "name": { + "description": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", + "type": "string" + }, + "nodePort": { + "description": "The port on each node on which this service is exposed when type is NodePort or LoadBalancer. Usually assigned by the system. If a value is specified, in-range, and not in use it will be used, otherwise the operation will fail. If not specified, a port will be allocated if this Service requires one. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type from NodePort to ClusterIP). More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + "type": "integer", + "format": "int32" + }, + "port": { + "description": "The port that will be exposed by this service.", + "type": "integer", + "format": "int32" + }, + "protocol": { + "description": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + "type": "string" + }, + "targetPort": { + "description": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + } + } + }, + "io.k8s.api.core.v1.ServiceSpec": { + "description": "ServiceSpec describes the attributes that a user creates on a service.", + "type": "object", + "properties": { + "allocateLoadBalancerNodePorts": { + "description": "allocateLoadBalancerNodePorts defines if NodePorts will be automatically allocated for services with type LoadBalancer. Default is \"true\". It may be set to \"false\" if the cluster load-balancer does not rely on NodePorts. If the caller requests specific NodePorts (by specifying a value), those requests will be respected, regardless of this field. This field may only be set for services with type LoadBalancer and will be cleared if the type is changed to any other type.", + "type": "boolean" + }, + "clusterIP": { + "description": "clusterIP is the IP address of the service and is usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be blank) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "string" + }, + "clusterIPs": { + "description": "ClusterIPs is a list of IP addresses assigned to this service, and are usually assigned randomly. If an address is specified manually, is in-range (as per system configuration), and is not in use, it will be allocated to the service; otherwise creation of the service will fail. This field may not be changed through updates unless the type field is also being changed to ExternalName (which requires this field to be empty) or the type field is being changed from ExternalName (in which case this field may optionally be specified, as describe above). Valid values are \"None\", empty string (\"\"), or a valid IP address. Setting this to \"None\" makes a \"headless service\" (no virtual IP), which is useful when direct endpoint connections are preferred and proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. If this field is specified when creating a Service of type ExternalName, creation will fail. This field will be wiped when updating a Service to type ExternalName. If this field is not specified, it will be initialized from the clusterIP field. If this field is specified, clients must ensure that clusterIPs[0] and clusterIP have the same value.\n\nThis field may hold a maximum of two entries (dual-stack IPs, in either order). These IPs must correspond to the values of the ipFamilies field. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "externalIPs": { + "description": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "type": "array", + "items": { + "type": "string" + } + }, + "externalName": { + "description": "externalName is the external reference that discovery mechanisms will return as an alias for this service (e.g. a DNS CNAME record). No proxying will be involved. Must be a lowercase RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires `type` to be \"ExternalName\".", + "type": "string" + }, + "externalTrafficPolicy": { + "description": "externalTrafficPolicy describes how nodes distribute service traffic they receive on one of the Service's \"externally-facing\" addresses (NodePorts, ExternalIPs, and LoadBalancer IPs). If set to \"Local\", the proxy will configure the service in a way that assumes that external load balancers will take care of balancing the service traffic between nodes, and so each node will deliver traffic only to the node-local endpoints of the service, without masquerading the client source IP. (Traffic mistakenly sent to a node with no endpoints will be dropped.) The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features). Note that traffic sent to an External IP or LoadBalancer IP from within the cluster will always get \"Cluster\" semantics, but clients sending to a NodePort from within the cluster may need to take traffic policy into account when picking a node.", + "type": "string" + }, + "healthCheckNodePort": { + "description": "healthCheckNodePort specifies the healthcheck nodePort for the service. This only applies when type is set to LoadBalancer and externalTrafficPolicy is set to Local. If a value is specified, is in-range, and is not in use, it will be used. If not specified, a value will be automatically allocated. External systems (e.g. load-balancers) can use this port to determine if a given node holds endpoints for this service or not. If this field is specified when creating a Service which does not need it, creation will fail. This field will be wiped when updating a Service to no longer need it (e.g. changing type). This field cannot be updated once set.", + "type": "integer", + "format": "int32" + }, + "internalTrafficPolicy": { + "description": "InternalTrafficPolicy describes how nodes distribute service traffic they receive on the ClusterIP. If set to \"Local\", the proxy will assume that pods only want to talk to endpoints of the service on the same node as the pod, dropping the traffic if there are no local endpoints. The default value, \"Cluster\", uses the standard behavior of routing to all endpoints evenly (possibly modified by topology and other features).", + "type": "string" + }, + "ipFamilies": { + "description": "IPFamilies is a list of IP families (e.g. IPv4, IPv6) assigned to this service. This field is usually assigned automatically based on cluster configuration and the ipFamilyPolicy field. If this field is specified manually, the requested family is available in the cluster, and ipFamilyPolicy allows it, it will be used; otherwise creation of the service will fail. This field is conditionally mutable: it allows for adding or removing a secondary IP family, but it does not allow changing the primary IP family of the Service. Valid values are \"IPv4\" and \"IPv6\". This field only applies to Services of types ClusterIP, NodePort, and LoadBalancer, and does apply to \"headless\" services. This field will be wiped when updating a Service to type ExternalName.\n\nThis field may hold a maximum of two entries (dual-stack families, in either order). These families must correspond to the values of the clusterIPs field, if specified. Both clusterIPs and ipFamilies are governed by the ipFamilyPolicy field.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "ipFamilyPolicy": { + "description": "IPFamilyPolicy represents the dual-stack-ness requested or required by this Service. If there is no value provided, then this field will be set to SingleStack. Services can be \"SingleStack\" (a single IP family), \"PreferDualStack\" (two IP families on dual-stack configured clusters or a single IP family on single-stack clusters), or \"RequireDualStack\" (two IP families on dual-stack configured clusters, otherwise fail). The ipFamilies and clusterIPs fields depend on the value of this field. This field will be wiped when updating a service to type ExternalName.", + "type": "string" + }, + "loadBalancerClass": { + "description": "loadBalancerClass is the class of the load balancer implementation this Service belongs to. If specified, the value of this field must be a label-style identifier, with an optional prefix, e.g. \"internal-vip\" or \"example.com/internal-vip\". Unprefixed names are reserved for end-users. This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load balancer implementation is used, today this is typically done through the cloud provider integration, but should apply for any default implementation. If set, it is assumed that a load balancer implementation is watching for Services with a matching class. Any default load balancer implementation (e.g. cloud providers) should ignore Services that set this field. This field can only be set when creating or updating a Service to type 'LoadBalancer'. Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type.", + "type": "string" + }, + "loadBalancerIP": { + "description": "Only applies to Service Type: LoadBalancer. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature. Deprecated: This field was under-specified and its meaning varies across implementations. Using it is non-portable and it may not support dual-stack. Users are encouraged to use implementation-specific annotations when available.", + "type": "string" + }, + "loadBalancerSourceRanges": { + "description": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/", + "type": "array", + "items": { + "type": "string" + } + }, + "ports": { + "description": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.ServicePort" + }, + "x-kubernetes-list-map-keys": [ + "port", + "protocol" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "port", + "x-kubernetes-patch-strategy": "merge" + }, + "publishNotReadyAddresses": { + "description": "publishNotReadyAddresses indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready. The primary use case for setting this field is for a StatefulSet's Headless Service to propagate SRV DNS records for its Pods for the purpose of peer discovery. The Kubernetes controllers that generate Endpoints and EndpointSlice resources for Services interpret this to mean that all endpoints are considered \"ready\" even if the Pods themselves are not. Agents which consume only Kubernetes generated endpoints through the Endpoints or EndpointSlice resources can safely assume this behavior.", + "type": "boolean" + }, + "selector": { + "description": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-kubernetes-map-type": "atomic" + }, + "sessionAffinity": { + "description": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "string" + }, + "sessionAffinityConfig": { + "description": "sessionAffinityConfig contains the configurations of session affinity.", + "$ref": "#/definitions/io.k8s.api.core.v1.SessionAffinityConfig" + }, + "type": { + "description": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object or EndpointSlice objects. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a virtual IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the same endpoints as the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the same endpoints as the clusterIP. \"ExternalName\" aliases this service to the specified externalName. Several other fields do not apply to ExternalName services. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.ServiceStatus": { + "description": "ServiceStatus represents the current status of a service.", + "type": "object", + "properties": { + "conditions": { + "description": "Current service state", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "loadBalancer": { + "description": "LoadBalancer contains the current status of the load-balancer, if one is present.", + "$ref": "#/definitions/io.k8s.api.core.v1.LoadBalancerStatus" + } + } + }, + "io.k8s.api.core.v1.SessionAffinityConfig": { + "description": "SessionAffinityConfig represents the configurations of session affinity.", + "type": "object", + "properties": { + "clientIP": { + "description": "clientIP contains the configurations of Client IP based session affinity.", + "$ref": "#/definitions/io.k8s.api.core.v1.ClientIPConfig" + } + } + }, + "io.k8s.api.core.v1.SleepAction": { + "description": "SleepAction describes a \"sleep\" action.", + "type": "object", + "required": [ + "seconds" + ], + "properties": { + "seconds": { + "description": "Seconds is the number of seconds to sleep.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.core.v1.StorageOSPersistentVolumeSource": { + "description": "Represents a StorageOS persistent volume resource.", + "type": "object", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "volumeName": { + "description": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "type": "string" + }, + "volumeNamespace": { + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.StorageOSVolumeSource": { + "description": "Represents a StorageOS persistent volume resource.", + "type": "object", + "properties": { + "fsType": { + "description": "fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "readOnly": { + "description": "readOnly defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "type": "boolean" + }, + "secretRef": { + "description": "secretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", + "$ref": "#/definitions/io.k8s.api.core.v1.LocalObjectReference" + }, + "volumeName": { + "description": "volumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "type": "string" + }, + "volumeNamespace": { + "description": "volumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Sysctl": { + "description": "Sysctl defines a kernel parameter to be set", + "type": "object", + "required": [ + "name", + "value" + ], + "properties": { + "name": { + "description": "Name of a property to set", + "type": "string" + }, + "value": { + "description": "Value of a property to set", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.TCPSocketAction": { + "description": "TCPSocketAction describes an action based on opening a socket", + "type": "object", + "required": [ + "port" + ], + "properties": { + "host": { + "description": "Optional: Host name to connect to, defaults to the pod IP.", + "type": "string" + }, + "port": { + "description": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + } + } + }, + "io.k8s.api.core.v1.Taint": { + "description": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", + "type": "object", + "required": [ + "key", + "effect" + ], + "properties": { + "effect": { + "description": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" + }, + "key": { + "description": "Required. The taint key to be applied to a node.", + "type": "string" + }, + "timeAdded": { + "description": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "value": { + "description": "The taint value corresponding to the taint key.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Toleration": { + "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", + "type": "object", + "properties": { + "effect": { + "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" + }, + "key": { + "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "type": "string" + }, + "operator": { + "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "type": "string" + }, + "tolerationSeconds": { + "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", + "type": "integer", + "format": "int64" + }, + "value": { + "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.TopologySelectorLabelRequirement": { + "description": "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.", + "type": "object", + "required": [ + "key", + "values" + ], + "properties": { + "key": { + "description": "The label key that the selector applies to.", + "type": "string" + }, + "values": { + "description": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.core.v1.TopologySelectorTerm": { + "description": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "type": "object", + "properties": { + "matchLabelExpressions": { + "description": "A list of topology selector requirements by labels.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.TopologySelectorLabelRequirement" + } + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.TopologySpreadConstraint": { + "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "type": "object", + "required": [ + "maxSkew", + "topologyKey", + "whenUnsatisfiable" + ], + "properties": { + "labelSelector": { + "description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "matchLabelKeys": { + "description": "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector.\n\nThis is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "maxSkew": { + "description": "MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It's a required field. Default value is 1 and 0 is not allowed.", + "type": "integer", + "format": "int32" + }, + "minDomains": { + "description": "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule.\n\nFor example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew.\n\nThis is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).", + "type": "integer", + "format": "int32" + }, + "nodeAffinityPolicy": { + "description": "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.\n\nIf this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "type": "string" + }, + "nodeTaintsPolicy": { + "description": "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included.\n\nIf this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.", + "type": "string" + }, + "topologyKey": { + "description": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each \u003ckey, value\u003e as a \"bucket\", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is \"kubernetes.io/hostname\", each Node is a domain of that topology. And, if TopologyKey is \"topology.kubernetes.io/zone\", each zone is a domain of that topology. It's a required field.", + "type": "string" + }, + "whenUnsatisfiable": { + "description": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location,\n but giving higher precedence to topologies that would help reduce the\n skew.\nA constraint is considered \"Unsatisfiable\" for an incoming pod if and only if every possible node assignment for that pod would violate \"MaxSkew\" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won't make it *more* imbalanced. It's a required field.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.TypedLocalObjectReference": { + "description": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.TypedObjectReference": { + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of resource being referenced Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.Volume": { + "description": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "awsElasticBlockStore": { + "description": "awsElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "$ref": "#/definitions/io.k8s.api.core.v1.AWSElasticBlockStoreVolumeSource" + }, + "azureDisk": { + "description": "azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.AzureDiskVolumeSource" + }, + "azureFile": { + "description": "azureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.AzureFileVolumeSource" + }, + "cephfs": { + "description": "cephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "$ref": "#/definitions/io.k8s.api.core.v1.CephFSVolumeSource" + }, + "cinder": { + "description": "cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.CinderVolumeSource" + }, + "configMap": { + "description": "configMap represents a configMap that should populate this volume", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapVolumeSource" + }, + "csi": { + "description": "csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).", + "$ref": "#/definitions/io.k8s.api.core.v1.CSIVolumeSource" + }, + "downwardAPI": { + "description": "downwardAPI represents downward API about the pod that should populate this volume", + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIVolumeSource" + }, + "emptyDir": { + "description": "emptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource" + }, + "ephemeral": { + "description": "ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.", + "$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource" + }, + "fc": { + "description": "fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource" + }, + "flexVolume": { + "description": "flexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "$ref": "#/definitions/io.k8s.api.core.v1.FlexVolumeSource" + }, + "flocker": { + "description": "flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "$ref": "#/definitions/io.k8s.api.core.v1.FlockerVolumeSource" + }, + "gcePersistentDisk": { + "description": "gcePersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "$ref": "#/definitions/io.k8s.api.core.v1.GCEPersistentDiskVolumeSource" + }, + "gitRepo": { + "description": "gitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "$ref": "#/definitions/io.k8s.api.core.v1.GitRepoVolumeSource" + }, + "glusterfs": { + "description": "glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.GlusterfsVolumeSource" + }, + "hostPath": { + "description": "hostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "$ref": "#/definitions/io.k8s.api.core.v1.HostPathVolumeSource" + }, + "iscsi": { + "description": "iscsi represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.ISCSIVolumeSource" + }, + "name": { + "description": "name of the volume. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "type": "string" + }, + "nfs": { + "description": "nfs represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "$ref": "#/definitions/io.k8s.api.core.v1.NFSVolumeSource" + }, + "persistentVolumeClaim": { + "description": "persistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource" + }, + "photonPersistentDisk": { + "description": "photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "$ref": "#/definitions/io.k8s.api.core.v1.PhotonPersistentDiskVolumeSource" + }, + "portworxVolume": { + "description": "portworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "$ref": "#/definitions/io.k8s.api.core.v1.PortworxVolumeSource" + }, + "projected": { + "description": "projected items for all in one resources secrets, configmaps, and downward API", + "$ref": "#/definitions/io.k8s.api.core.v1.ProjectedVolumeSource" + }, + "quobyte": { + "description": "quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "$ref": "#/definitions/io.k8s.api.core.v1.QuobyteVolumeSource" + }, + "rbd": { + "description": "rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "$ref": "#/definitions/io.k8s.api.core.v1.RBDVolumeSource" + }, + "scaleIO": { + "description": "scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "$ref": "#/definitions/io.k8s.api.core.v1.ScaleIOVolumeSource" + }, + "secret": { + "description": "secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretVolumeSource" + }, + "storageos": { + "description": "storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "$ref": "#/definitions/io.k8s.api.core.v1.StorageOSVolumeSource" + }, + "vsphereVolume": { + "description": "vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "$ref": "#/definitions/io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource" + } + } + }, + "io.k8s.api.core.v1.VolumeDevice": { + "description": "volumeDevice describes a mapping of a raw block device within a container.", + "type": "object", + "required": [ + "name", + "devicePath" + ], + "properties": { + "devicePath": { + "description": "devicePath is the path inside of the container that the device will be mapped to.", + "type": "string" + }, + "name": { + "description": "name must match the name of a persistentVolumeClaim in the pod", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.VolumeMount": { + "description": "VolumeMount describes a mounting of a Volume within a container.", + "type": "object", + "required": [ + "name", + "mountPath" + ], + "properties": { + "mountPath": { + "description": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "type": "string" + }, + "mountPropagation": { + "description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + "type": "string" + }, + "name": { + "description": "This must match the Name of a Volume.", + "type": "string" + }, + "readOnly": { + "description": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "type": "boolean" + }, + "subPath": { + "description": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "type": "string" + }, + "subPathExpr": { + "description": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.VolumeNodeAffinity": { + "description": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", + "type": "object", + "properties": { + "required": { + "description": "required specifies hard node constraints that must be met.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + } + } + }, + "io.k8s.api.core.v1.VolumeProjection": { + "description": "Projection that may be projected along with other supported volume types", + "type": "object", + "properties": { + "clusterTrustBundle": { + "description": "ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file.\n\nAlpha, gated by the ClusterTrustBundleProjection feature gate.\n\nClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector.\n\nKubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. The ordering of certificates within the file is arbitrary, and Kubelet may change the order over time.", + "$ref": "#/definitions/io.k8s.api.core.v1.ClusterTrustBundleProjection" }, "configMap": { - "description": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "description": "configMap information about the configMap data to project", + "$ref": "#/definitions/io.k8s.api.core.v1.ConfigMapProjection" + }, + "downwardAPI": { + "description": "downwardAPI information about the downwardAPI data to project", + "$ref": "#/definitions/io.k8s.api.core.v1.DownwardAPIProjection" + }, + "secret": { + "description": "secret information about the secret data to project", + "$ref": "#/definitions/io.k8s.api.core.v1.SecretProjection" + }, + "serviceAccountToken": { + "description": "serviceAccountToken is information about the serviceAccountToken data to project", + "$ref": "#/definitions/io.k8s.api.core.v1.ServiceAccountTokenProjection" + } + } + }, + "io.k8s.api.core.v1.VolumeResourceRequirements": { + "description": "VolumeResourceRequirements describes the storage resource requirements for a volume.", + "type": "object", + "properties": { + "limits": { + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + }, + "requests": { + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + } + }, + "io.k8s.api.core.v1.VsphereVirtualDiskVolumeSource": { + "description": "Represents a vSphere volume resource.", + "type": "object", + "required": [ + "volumePath" + ], + "properties": { + "fsType": { + "description": "fsType is filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "type": "string" + }, + "storagePolicyID": { + "description": "storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", + "type": "string" + }, + "storagePolicyName": { + "description": "storagePolicyName is the storage Policy Based Management (SPBM) profile name.", + "type": "string" + }, + "volumePath": { + "description": "volumePath is the path that identifies vSphere volume vmdk", + "type": "string" + } + } + }, + "io.k8s.api.core.v1.WeightedPodAffinityTerm": { + "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "type": "object", + "required": [ + "weight", + "podAffinityTerm" + ], + "properties": { + "podAffinityTerm": { + "description": "Required. A pod affinity term, associated with the corresponding weight.", + "$ref": "#/definitions/io.k8s.api.core.v1.PodAffinityTerm" + }, + "weight": { + "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.core.v1.WindowsSecurityContextOptions": { + "description": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", + "type": "object", + "properties": { + "gmsaCredentialSpec": { + "description": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", + "type": "string" + }, + "gmsaCredentialSpecName": { + "description": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", + "type": "string" + }, + "hostProcess": { + "description": "HostProcess determines if a container should be run as a 'Host Process' container. All of a Pod's containers must have the same effective HostProcess value (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). In addition, if HostProcess is true then HostNetwork must also be set to true.", + "type": "boolean" + }, + "runAsUserName": { + "description": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "type": "string" + } + } + }, + "io.k8s.api.discovery.v1.Endpoint": { + "description": "Endpoint represents a single logical \"backend\" implementing a service.", + "type": "object", + "required": [ + "addresses" + ], + "properties": { + "addresses": { + "description": "addresses of this endpoint. The contents of this field are interpreted according to the corresponding EndpointSlice addressType field. Consumers must handle different types of addresses in the context of their own capabilities. This must contain at least one address but no more than 100. These are all assumed to be fungible and clients may choose to only use the first element. Refer to: https://issue.k8s.io/106267", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "conditions": { + "description": "conditions contains information about the current status of the endpoint.", + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointConditions" + }, + "deprecatedTopology": { + "description": "deprecatedTopology contains topology information part of the v1beta1 API. This field is deprecated, and will be removed when the v1beta1 API is removed (no sooner than kubernetes v1.24). While this field can hold values, it is not writable through the v1 API, and any attempts to write to it will be silently ignored. Topology information can be found in the zone and nodeName fields instead.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "hints": { + "description": "hints contains information associated with how an endpoint should be consumed.", + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointHints" + }, + "hostname": { + "description": "hostname of this endpoint. This field may be used by consumers of endpoints to distinguish endpoints from each other (e.g. in DNS names). Multiple endpoints which use the same hostname should be considered fungible (e.g. multiple A values in DNS). Must be lowercase and pass DNS Label (RFC 1123) validation.", + "type": "string" + }, + "nodeName": { + "description": "nodeName represents the name of the Node hosting this endpoint. This can be used to determine endpoints local to a Node.", + "type": "string" + }, + "targetRef": { + "description": "targetRef is a reference to a Kubernetes object that represents this endpoint.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "zone": { + "description": "zone is the name of the Zone this endpoint exists in.", + "type": "string" + } + } + }, + "io.k8s.api.discovery.v1.EndpointConditions": { + "description": "EndpointConditions represents the current condition of an endpoint.", + "type": "object", + "properties": { + "ready": { + "description": "ready indicates that this endpoint is prepared to receive traffic, according to whatever system is managing the endpoint. A nil value indicates an unknown state. In most cases consumers should interpret this unknown state as ready. For compatibility reasons, ready should never be \"true\" for terminating endpoints, except when the normal readiness behavior is being explicitly overridden, for example when the associated Service has set the publishNotReadyAddresses flag.", + "type": "boolean" + }, + "serving": { + "description": "serving is identical to ready except that it is set regardless of the terminating state of endpoints. This condition should be set to true for a ready endpoint that is terminating. If nil, consumers should defer to the ready condition.", + "type": "boolean" + }, + "terminating": { + "description": "terminating indicates that this endpoint is terminating. A nil value indicates an unknown state. Consumers should interpret this unknown state to mean that the endpoint is not terminating.", + "type": "boolean" + } + } + }, + "io.k8s.api.discovery.v1.EndpointHints": { + "description": "EndpointHints provides hints describing how an endpoint should be consumed.", + "type": "object", + "properties": { + "forZones": { + "description": "forZones indicates the zone(s) this endpoint should be consumed by to enable topology aware routing.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.ForZone" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.discovery.v1.EndpointPort": { + "description": "EndpointPort represents a Port used by an EndpointSlice", + "type": "object", + "properties": { + "appProtocol": { + "description": "The application protocol for this port. This is used as a hint for implementations to offer richer behavior for protocols that they understand. This field follows standard Kubernetes label syntax. Valid values are either:\n\n* Un-prefixed protocol names - reserved for IANA standard service names (as per RFC-6335 and https://www.iana.org/assignments/service-names).\n\n* Kubernetes-defined prefixed names:\n * 'kubernetes.io/h2c' - HTTP/2 prior knowledge over cleartext as described in https://www.rfc-editor.org/rfc/rfc9113.html#name-starting-http-2-with-prior-\n * 'kubernetes.io/ws' - WebSocket over cleartext as described in https://www.rfc-editor.org/rfc/rfc6455\n * 'kubernetes.io/wss' - WebSocket over TLS as described in https://www.rfc-editor.org/rfc/rfc6455\n\n* Other protocols should use implementation-defined prefixed names such as mycompany.com/my-custom-protocol.", + "type": "string" + }, + "name": { + "description": "name represents the name of this port. All ports in an EndpointSlice must have a unique name. If the EndpointSlice is derived from a Kubernetes service, this corresponds to the Service.ports[].name. Name must either be an empty string or pass DNS_LABEL validation: * must be no more than 63 characters long. * must consist of lower case alphanumeric characters or '-'. * must start and end with an alphanumeric character. Default is empty string.", + "type": "string" + }, + "port": { + "description": "port represents the port number of the endpoint. If this is not specified, ports are not restricted and must be interpreted in the context of the specific consumer.", + "type": "integer", + "format": "int32" + }, + "protocol": { + "description": "protocol represents the IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.discovery.v1.EndpointSlice": { + "description": "EndpointSlice represents a subset of the endpoints that implement a service. For a given service there may be multiple EndpointSlice objects, selected by labels, which must be joined to produce the full set of endpoints.", + "type": "object", + "required": [ + "addressType", + "endpoints" + ], + "properties": { + "addressType": { + "description": "addressType specifies the type of address carried by this EndpointSlice. All addresses in this slice must be the same type. This field is immutable after creation. The following address types are currently supported: * IPv4: Represents an IPv4 Address. * IPv6: Represents an IPv6 Address. * FQDN: Represents a Fully Qualified Domain Name.", + "type": "string" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "endpoints": { + "description": "endpoints is a list of unique endpoints in this slice. Each slice may include a maximum of 1000 endpoints.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.Endpoint" + }, + "x-kubernetes-list-type": "atomic" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "ports": { + "description": "ports specifies the list of network ports exposed by each endpoint in this slice. Each port must have a unique name. When ports is empty, it indicates that there are no defined ports. When a port is defined with a nil port value, it indicates \"all ports\". Each slice may include a maximum of 100 ports.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointPort" + }, + "x-kubernetes-list-type": "atomic" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "discovery.k8s.io", + "kind": "EndpointSlice", + "version": "v1" + } + ] + }, + "io.k8s.api.discovery.v1.EndpointSliceList": { + "description": "EndpointSliceList represents a list of endpoint slices", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of endpoint slices", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.discovery.v1.EndpointSlice" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "discovery.k8s.io", + "kind": "EndpointSliceList", + "version": "v1" + } + ] + }, + "io.k8s.api.discovery.v1.ForZone": { + "description": "ForZone provides information about which zones should consume this endpoint.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "name represents the name of the zone.", + "type": "string" + } + } + }, + "io.k8s.api.events.v1.Event": { + "description": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. Events have a limited retention time and triggers and messages may evolve with time. Event consumers should not rely on the timing of an event with a given Reason reflecting a consistent underlying trigger, or the continued existence of events with that Reason. Events should be treated as informative, best-effort, supplemental data.", + "type": "object", + "required": [ + "eventTime" + ], + "properties": { + "action": { + "description": "action is what action was taken/failed regarding to the regarding object. It is machine-readable. This field cannot be empty for new Events and it can have at most 128 characters.", + "type": "string" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "deprecatedCount": { + "description": "deprecatedCount is the deprecated field assuring backward compatibility with core.v1 Event type.", + "type": "integer", + "format": "int32" + }, + "deprecatedFirstTimestamp": { + "description": "deprecatedFirstTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "deprecatedLastTimestamp": { + "description": "deprecatedLastTimestamp is the deprecated field assuring backward compatibility with core.v1 Event type.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "deprecatedSource": { + "description": "deprecatedSource is the deprecated field assuring backward compatibility with core.v1 Event type.", + "$ref": "#/definitions/io.k8s.api.core.v1.EventSource" + }, + "eventTime": { + "description": "eventTime is the time when this Event was first observed. It is required.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "note": { + "description": "note is a human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + "type": "string" + }, + "reason": { + "description": "reason is why the action was taken. It is human-readable. This field cannot be empty for new Events and it can have at most 128 characters.", + "type": "string" + }, + "regarding": { + "description": "regarding contains the object this Event is about. In most cases it's an Object reporting controller implements, e.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "related": { + "description": "related is the optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", + "$ref": "#/definitions/io.k8s.api.core.v1.ObjectReference" + }, + "reportingController": { + "description": "reportingController is the name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. This field cannot be empty for new Events.", + "type": "string" + }, + "reportingInstance": { + "description": "reportingInstance is the ID of the controller instance, e.g. `kubelet-xyzf`. This field cannot be empty for new Events and it can have at most 128 characters.", + "type": "string" + }, + "series": { + "description": "series is data about the Event series this event represents or nil if it's a singleton Event.", + "$ref": "#/definitions/io.k8s.api.events.v1.EventSeries" + }, + "type": { + "description": "type is the type of this event (Normal, Warning), new types could be added in the future. It is machine-readable. This field cannot be empty for new Events.", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "events.k8s.io", + "kind": "Event", + "version": "v1" + } + ] + }, + "io.k8s.api.events.v1.EventList": { + "description": "EventList is a list of Event objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.events.v1.Event" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "events.k8s.io", + "kind": "EventList", + "version": "v1" + } + ] + }, + "io.k8s.api.events.v1.EventSeries": { + "description": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time. How often to update the EventSeries is up to the event reporters. The default event reporter in \"k8s.io/client-go/tools/events/event_broadcaster.go\" shows how this struct is updated on heartbeats and can guide customized reporter implementations.", + "type": "object", + "required": [ + "count", + "lastObservedTime" + ], + "properties": { + "count": { + "description": "count is the number of occurrences in this series up to the last heartbeat time.", + "type": "integer", + "format": "int32" + }, + "lastObservedTime": { + "description": "lastObservedTime is the time when last Event from the series was seen before last heartbeat.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime" + } + } + }, + "io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration": { + "description": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "type": "object", + "properties": { + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "type": "integer", + "format": "int32" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod": { + "description": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "description": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1.FlowSchema": { + "description": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchemaSpec" + }, + "status": { + "description": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchemaStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchema", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaCondition": { + "description": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "object", + "properties": { + "lastTransitionTime": { + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaList": { + "description": "FlowSchemaList is a list of FlowSchema objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of FlowSchemas.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchema" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchemaList", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaSpec": { + "description": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "type": "object", + "required": [ + "priorityLevelConfiguration" + ], + "properties": { + "distinguisherMethod": { + "description": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowDistinguisherMethod" + }, + "matchingPrecedence": { + "description": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "type": "integer", + "format": "int32" + }, + "priorityLevelConfiguration": { + "description": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference" + }, + "rules": { + "description": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.flowcontrol.v1.FlowSchemaStatus": { + "description": "FlowSchemaStatus represents the current state of a FlowSchema.", + "type": "object", + "properties": { + "conditions": { + "description": "`conditions` is a list of the current states of FlowSchema.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.FlowSchemaCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.flowcontrol.v1.GroupSubject": { + "description": "GroupSubject holds detailed information for group-kind subject.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1.LimitResponse": { + "description": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "queuing": { + "description": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.QueuingConfiguration" + }, + "type": { + "description": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "type": "string" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "queuing": "Queuing" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration": { + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", + "type": "object", + "properties": { + "borrowingLimitPercent": { + "description": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", + "type": "integer", + "format": "int32" + }, + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "type": "integer", + "format": "int32" + }, + "limitResponse": { + "description": "`limitResponse` indicates what to do with requests that can not be executed right now", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.LimitResponse" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level.\n\nIf not specified, this field defaults to a value of 30.\n\nSetting this field to zero supports the construction of a \"jail\" for this priority level that is used to hold some request(s)", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.flowcontrol.v1.NonResourcePolicyRule": { + "description": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "type": "object", + "required": [ + "verbs", + "nonResourceURLs" + ], + "properties": { + "nonResourceURLs": { + "description": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.flowcontrol.v1.PolicyRulesWithSubjects": { + "description": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "type": "object", + "required": [ + "subjects" + ], + "properties": { + "nonResourceRules": { + "description": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.NonResourcePolicyRule" + }, + "x-kubernetes-list-type": "atomic" + }, + "resourceRules": { + "description": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.ResourcePolicyRule" + }, + "x-kubernetes-list-type": "atomic" + }, + "subjects": { + "description": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.Subject" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration": { + "description": "PriorityLevelConfiguration represents the configuration of a priority level.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec" + }, + "status": { + "description": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfiguration", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition": { + "description": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "object", + "properties": { + "lastTransitionTime": { + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationList": { + "description": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of request-priorities.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfiguration" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfigurationList", + "version": "v1" + } + ] + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationReference": { + "description": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "`name` is the name of the priority level configuration being referenced Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationSpec": { + "description": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "exempt": { + "description": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.ExemptPriorityLevelConfiguration" + }, + "limited": { + "description": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.LimitedPriorityLevelConfiguration" + }, + "type": { + "description": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "type": "string" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "exempt": "Exempt", + "limited": "Limited" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationStatus": { + "description": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "type": "object", + "properties": { + "conditions": { + "description": "`conditions` is the current state of \"request-priority\".", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.PriorityLevelConfigurationCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.flowcontrol.v1.QueuingConfiguration": { + "description": "QueuingConfiguration holds the configuration parameters for queuing", + "type": "object", + "properties": { + "handSize": { + "description": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "type": "integer", + "format": "int32" + }, + "queueLengthLimit": { + "description": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", + "type": "integer", + "format": "int32" + }, + "queues": { + "description": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.flowcontrol.v1.ResourcePolicyRule": { + "description": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "type": "object", + "required": [ + "verbs", + "apiGroups", + "resources" + ], + "properties": { + "apiGroups": { + "description": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "clusterScope": { + "description": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "type": "boolean" + }, + "namespaces": { + "description": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "resources": { + "description": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.flowcontrol.v1.ServiceAccountSubject": { + "description": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "type": "object", + "required": [ + "namespace", + "name" + ], + "properties": { + "name": { + "description": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", + "type": "string" + }, + "namespace": { + "description": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1.Subject": { + "description": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "type": "object", + "required": [ + "kind" + ], + "properties": { + "group": { + "description": "`group` matches based on user group name.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.GroupSubject" + }, + "kind": { + "description": "`kind` indicates which one of the other fields is non-empty. Required", + "type": "string" + }, + "serviceAccount": { + "description": "`serviceAccount` matches ServiceAccounts.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.ServiceAccountSubject" + }, + "user": { + "description": "`user` matches based on username.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1.UserSubject" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "kind", + "fields-to-discriminateBy": { + "group": "Group", + "serviceAccount": "ServiceAccount", + "user": "User" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1.UserSubject": { + "description": "UserSubject holds detailed information for user-kind subject.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "`name` is the username that matches, or \"*\" to match all usernames. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration": { + "description": "ExemptPriorityLevelConfiguration describes the configurable aspects of the handling of exempt requests. In the mandatory exempt configuration object the values in the fields here can be modified by authorized users, unlike the rest of the `spec`.", + "type": "object", + "properties": { + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. This value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "type": "integer", + "format": "int32" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats nominally reserved for this priority level. This DOES NOT limit the dispatching from this priority level but affects the other priority levels through the borrowing mechanism. The server's concurrency limit (ServerCL) is divided among all the priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of zero.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod": { + "description": "FlowDistinguisherMethod specifies the method of a flow distinguisher.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "type": { + "description": "`type` is the type of flow distinguisher method The supported types are \"ByUser\" and \"ByNamespace\". Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchema": { + "description": "FlowSchema defines the schema of a group of flows. Note that a flow is made up of a set of inbound API requests with similar attributes and is identified by a pair of strings: the name of the FlowSchema and a \"flow distinguisher\".", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "`spec` is the specification of the desired behavior of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec" + }, + "status": { + "description": "`status` is the current status of a FlowSchema. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchema", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition": { + "description": "FlowSchemaCondition describes conditions for a FlowSchema.", + "type": "object", + "properties": { + "lastTransitionTime": { + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaList": { + "description": "FlowSchemaList is a list of FlowSchema objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of FlowSchemas.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchema" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "FlowSchemaList", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaSpec": { + "description": "FlowSchemaSpec describes how the FlowSchema's specification looks like.", + "type": "object", + "required": [ + "priorityLevelConfiguration" + ], + "properties": { + "distinguisherMethod": { + "description": "`distinguisherMethod` defines how to compute the flow distinguisher for requests that match this schema. `nil` specifies that the distinguisher is disabled and thus will always be the empty string.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowDistinguisherMethod" + }, + "matchingPrecedence": { + "description": "`matchingPrecedence` is used to choose among the FlowSchemas that match a given request. The chosen FlowSchema is among those with the numerically lowest (which we take to be logically highest) MatchingPrecedence. Each MatchingPrecedence value must be ranged in [1,10000]. Note that if the precedence is not specified, it will be set to 1000 as default.", + "type": "integer", + "format": "int32" + }, + "priorityLevelConfiguration": { + "description": "`priorityLevelConfiguration` should reference a PriorityLevelConfiguration in the cluster. If the reference cannot be resolved, the FlowSchema will be ignored and marked as invalid in its status. Required.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference" + }, + "rules": { + "description": "`rules` describes which requests will match this flow schema. This FlowSchema matches a request if and only if at least one member of rules matches the request. if it is an empty slice, there will be no requests matching the FlowSchema.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.FlowSchemaStatus": { + "description": "FlowSchemaStatus represents the current state of a FlowSchema.", + "type": "object", + "properties": { + "conditions": { + "description": "`conditions` is a list of the current states of FlowSchema.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.FlowSchemaCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.GroupSubject": { + "description": "GroupSubject holds detailed information for group-kind subject.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "name is the user group that matches, or \"*\" to match all user groups. See https://github.com/kubernetes/apiserver/blob/master/pkg/authentication/user/user.go for some well-known group names. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.LimitResponse": { + "description": "LimitResponse defines how to handle requests that can not be executed right now.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "queuing": { + "description": "`queuing` holds the configuration parameters for queuing. This field may be non-empty only if `type` is `\"Queue\"`.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration" + }, + "type": { + "description": "`type` is \"Queue\" or \"Reject\". \"Queue\" means that requests that can not be executed upon arrival are held in a queue until they can be executed or a queuing limit is reached. \"Reject\" means that requests that can not be executed upon arrival are rejected. Required.", + "type": "string" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "queuing": "Queuing" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration": { + "description": "LimitedPriorityLevelConfiguration specifies how to handle requests that are subject to limits. It addresses two issues:\n - How are requests for this priority level limited?\n - What should be done with requests that exceed the limit?", + "type": "object", + "properties": { + "borrowingLimitPercent": { + "description": "`borrowingLimitPercent`, if present, configures a limit on how many seats this priority level can borrow from other priority levels. The limit is known as this level's BorrowingConcurrencyLimit (BorrowingCL) and is a limit on the total number of seats that this level may borrow at any one time. This field holds the ratio of that limit to the level's nominal concurrency limit. When this field is non-nil, it must hold a non-negative integer and the limit is calculated as follows.\n\nBorrowingCL(i) = round( NominalCL(i) * borrowingLimitPercent(i)/100.0 )\n\nThe value of this field can be more than 100, implying that this priority level can borrow a number of seats that is greater than its own nominal concurrency limit (NominalCL). When this field is left `nil`, the limit is effectively infinite.", + "type": "integer", + "format": "int32" + }, + "lendablePercent": { + "description": "`lendablePercent` prescribes the fraction of the level's NominalCL that can be borrowed by other priority levels. The value of this field must be between 0 and 100, inclusive, and it defaults to 0. The number of seats that other levels can borrow from this level, known as this level's LendableConcurrencyLimit (LendableCL), is defined as follows.\n\nLendableCL(i) = round( NominalCL(i) * lendablePercent(i)/100.0 )", + "type": "integer", + "format": "int32" + }, + "limitResponse": { + "description": "`limitResponse` indicates what to do with requests that can not be executed right now", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.LimitResponse" + }, + "nominalConcurrencyShares": { + "description": "`nominalConcurrencyShares` (NCS) contributes to the computation of the NominalConcurrencyLimit (NominalCL) of this level. This is the number of execution seats available at this priority level. This is used both for requests dispatched from this priority level as well as requests dispatched from other priority levels borrowing seats from this level. The server's concurrency limit (ServerCL) is divided among the Limited priority levels in proportion to their NCS values:\n\nNominalCL(i) = ceil( ServerCL * NCS(i) / sum_ncs ) sum_ncs = sum[priority level k] NCS(k)\n\nBigger numbers mean a larger nominal concurrency limit, at the expense of every other priority level. This field has a default value of 30.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule": { + "description": "NonResourcePolicyRule is a predicate that matches non-resource requests according to their verb and the target non-resource URL. A NonResourcePolicyRule matches a request if and only if both (a) at least one member of verbs matches the request and (b) at least one member of nonResourceURLs matches the request.", + "type": "object", + "required": [ + "verbs", + "nonResourceURLs" + ], + "properties": { + "nonResourceURLs": { + "description": "`nonResourceURLs` is a set of url prefixes that a user should have access to and may not be empty. For example:\n - \"/healthz\" is legal\n - \"/hea*\" is illegal\n - \"/hea\" is legal but matches nothing\n - \"/hea/*\" also matches nothing\n - \"/healthz/*\" matches all per-component health checks.\n\"*\" matches all non-resource urls. if it is present, it must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs. If it is present, it must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.PolicyRulesWithSubjects": { + "description": "PolicyRulesWithSubjects prescribes a test that applies to a request to an apiserver. The test considers the subject making the request, the verb being requested, and the resource to be acted upon. This PolicyRulesWithSubjects matches a request if and only if both (a) at least one member of subjects matches the request and (b) at least one member of resourceRules or nonResourceRules matches the request.", + "type": "object", + "required": [ + "subjects" + ], + "properties": { + "nonResourceRules": { + "description": "`nonResourceRules` is a list of NonResourcePolicyRules that identify matching requests according to their verb and the target non-resource URL.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.NonResourcePolicyRule" + }, + "x-kubernetes-list-type": "atomic" + }, + "resourceRules": { + "description": "`resourceRules` is a slice of ResourcePolicyRules that identify matching requests according to their verb and the target resource. At least one of `resourceRules` and `nonResourceRules` has to be non-empty.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule" + }, + "x-kubernetes-list-type": "atomic" + }, + "subjects": { + "description": "subjects is the list of normal user, serviceaccount, or group that this rule cares about. There must be at least one member in this slice. A slice that includes both the system:authenticated and system:unauthenticated user groups matches every request. Required.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.Subject" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration": { + "description": "PriorityLevelConfiguration represents the configuration of a priority level.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "`spec` is the specification of the desired behavior of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec" + }, + "status": { + "description": "`status` is the current status of a \"request-priority\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfiguration", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition": { + "description": "PriorityLevelConfigurationCondition defines the condition of priority level.", + "type": "object", + "properties": { + "lastTransitionTime": { + "description": "`lastTransitionTime` is the last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "`message` is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "`reason` is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "`status` is the status of the condition. Can be True, False, Unknown. Required.", + "type": "string" + }, + "type": { + "description": "`type` is the type of the condition. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationList": { + "description": "PriorityLevelConfigurationList is a list of PriorityLevelConfiguration objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "`items` is a list of request-priorities.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfiguration" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "`metadata` is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "PriorityLevelConfigurationList", + "version": "v1beta3" + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationReference": { + "description": "PriorityLevelConfigurationReference contains information that points to the \"request-priority\" being used.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "`name` is the name of the priority level configuration being referenced Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationSpec": { + "description": "PriorityLevelConfigurationSpec specifies the configuration of a priority level.", + "type": "object", + "required": [ + "type" + ], + "properties": { + "exempt": { + "description": "`exempt` specifies how requests are handled for an exempt priority level. This field MUST be empty if `type` is `\"Limited\"`. This field MAY be non-empty if `type` is `\"Exempt\"`. If empty and `type` is `\"Exempt\"` then the default values for `ExemptPriorityLevelConfiguration` apply.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.ExemptPriorityLevelConfiguration" + }, + "limited": { + "description": "`limited` specifies how requests are handled for a Limited priority level. This field must be non-empty if and only if `type` is `\"Limited\"`.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.LimitedPriorityLevelConfiguration" + }, + "type": { + "description": "`type` indicates whether this priority level is subject to limitation on request execution. A value of `\"Exempt\"` means that requests of this priority level are not subject to a limit (and thus are never queued) and do not detract from the capacity made available to other priority levels. A value of `\"Limited\"` means that (a) requests of this priority level _are_ subject to limits and (b) some of the server's limited capacity is made available exclusively to this priority level. Required.", + "type": "string" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "type", + "fields-to-discriminateBy": { + "exempt": "Exempt", + "limited": "Limited" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationStatus": { + "description": "PriorityLevelConfigurationStatus represents the current state of a \"request-priority\".", + "type": "object", + "properties": { + "conditions": { + "description": "`conditions` is the current state of \"request-priority\".", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.PriorityLevelConfigurationCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.QueuingConfiguration": { + "description": "QueuingConfiguration holds the configuration parameters for queuing", + "type": "object", + "properties": { + "handSize": { + "description": "`handSize` is a small positive number that configures the shuffle sharding of requests into queues. When enqueuing a request at this priority level the request's flow identifier (a string pair) is hashed and the hash value is used to shuffle the list of queues and deal a hand of the size specified here. The request is put into one of the shortest queues in that hand. `handSize` must be no larger than `queues`, and should be significantly smaller (so that a few heavy flows do not saturate most of the queues). See the user-facing documentation for more extensive guidance on setting this field. This field has a default value of 8.", + "type": "integer", + "format": "int32" + }, + "queueLengthLimit": { + "description": "`queueLengthLimit` is the maximum number of requests allowed to be waiting in a given queue of this priority level at a time; excess requests are rejected. This value must be positive. If not specified, it will be defaulted to 50.", + "type": "integer", + "format": "int32" + }, + "queues": { + "description": "`queues` is the number of queues for this priority level. The queues exist independently at each apiserver. The value must be positive. Setting it to 1 effectively precludes shufflesharding and thus makes the distinguisher method of associated flow schemas irrelevant. This field has a default value of 64.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.ResourcePolicyRule": { + "description": "ResourcePolicyRule is a predicate that matches some resource requests, testing the request's verb and the target resource. A ResourcePolicyRule matches a resource request if and only if: (a) at least one member of verbs matches the request, (b) at least one member of apiGroups matches the request, (c) at least one member of resources matches the request, and (d) either (d1) the request does not specify a namespace (i.e., `Namespace==\"\"`) and clusterScope is true or (d2) the request specifies a namespace and least one member of namespaces matches the request's namespace.", + "type": "object", + "required": [ + "verbs", + "apiGroups", + "resources" + ], + "properties": { + "apiGroups": { + "description": "`apiGroups` is a list of matching API groups and may not be empty. \"*\" matches all API groups and, if present, must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "clusterScope": { + "description": "`clusterScope` indicates whether to match requests that do not specify a namespace (which happens either because the resource is not namespaced or the request targets all namespaces). If this field is omitted or false then the `namespaces` field must contain a non-empty list.", + "type": "boolean" + }, + "namespaces": { + "description": "`namespaces` is a list of target namespaces that restricts matches. A request that specifies a target namespace matches only if either (a) this list contains that target namespace or (b) this list contains \"*\". Note that \"*\" matches any specified namespace but does not match a request that _does not specify_ a namespace (see the `clusterScope` field for that). This list may be empty, but only if `clusterScope` is true.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "resources": { + "description": "`resources` is a list of matching resources (i.e., lowercase and plural) with, if desired, subresource. For example, [ \"services\", \"nodes/status\" ]. This list may not be empty. \"*\" matches all resources and, if present, must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + }, + "verbs": { + "description": "`verbs` is a list of matching verbs and may not be empty. \"*\" matches all verbs and, if present, must be the only entry. Required.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject": { + "description": "ServiceAccountSubject holds detailed information for service-account-kind subject.", + "type": "object", + "required": [ + "namespace", + "name" + ], + "properties": { + "name": { + "description": "`name` is the name of matching ServiceAccount objects, or \"*\" to match regardless of name. Required.", + "type": "string" + }, + "namespace": { + "description": "`namespace` is the namespace of matching ServiceAccount objects. Required.", + "type": "string" + } + } + }, + "io.k8s.api.flowcontrol.v1beta3.Subject": { + "description": "Subject matches the originator of a request, as identified by the request authentication system. There are three ways of matching an originator; by user, group, or service account.", + "type": "object", + "required": [ + "kind" + ], + "properties": { + "group": { + "description": "`group` matches based on user group name.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.GroupSubject" + }, + "kind": { + "description": "`kind` indicates which one of the other fields is non-empty. Required", + "type": "string" + }, + "serviceAccount": { + "description": "`serviceAccount` matches ServiceAccounts.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.ServiceAccountSubject" + }, + "user": { + "description": "`user` matches based on username.", + "$ref": "#/definitions/io.k8s.api.flowcontrol.v1beta3.UserSubject" + } + }, + "x-kubernetes-unions": [ + { + "discriminator": "kind", + "fields-to-discriminateBy": { + "group": "Group", + "serviceAccount": "ServiceAccount", + "user": "User" + } + } + ] + }, + "io.k8s.api.flowcontrol.v1beta3.UserSubject": { + "description": "UserSubject holds detailed information for user-kind subject.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "`name` is the username that matches, or \"*\" to match all usernames. Required.", + "type": "string" + } + } + }, + "io.k8s.api.networking.v1.HTTPIngressPath": { + "description": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", + "type": "object", + "required": [ + "pathType", + "backend" + ], + "properties": { + "backend": { + "description": "backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressBackend" + }, + "path": { + "description": "path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/' and must be present when using PathType with value \"Exact\" or \"Prefix\".", + "type": "string" + }, + "pathType": { + "description": "pathType determines the interpretation of the path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types.", + "type": "string" + } + } + }, + "io.k8s.api.networking.v1.HTTPIngressRuleValue": { + "description": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http://\u003chost\u003e/\u003cpath\u003e?\u003csearchpart\u003e -\u003e backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "type": "object", + "required": [ + "paths" + ], + "properties": { + "paths": { + "description": "paths is a collection of paths that map requests to backends.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.HTTPIngressPath" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.networking.v1.IPBlock": { + "description": "IPBlock describes a particular CIDR (Ex. \"192.168.1.0/24\",\"2001:db8::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "type": "object", + "required": [ + "cidr" + ], + "properties": { + "cidr": { + "description": "cidr is a string representing the IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\"", + "type": "string" + }, + "except": { + "description": "except is a slice of CIDRs that should not be included within an IPBlock Valid examples are \"192.168.1.0/24\" or \"2001:db8::/64\" Except values will be rejected if they are outside the cidr range", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.networking.v1.Ingress": { + "description": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressSpec" + }, + "status": { + "description": "status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "Ingress", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressBackend": { + "description": "IngressBackend describes all endpoints for a given service and port.", + "type": "object", + "properties": { + "resource": { + "description": "resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, a service.Name and service.Port must not be specified. This is a mutually exclusive setting with \"Service\".", + "$ref": "#/definitions/io.k8s.api.core.v1.TypedLocalObjectReference" + }, + "service": { + "description": "service references a service as a backend. This is a mutually exclusive setting with \"Resource\".", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressServiceBackend" + } + } + }, + "io.k8s.api.networking.v1.IngressClass": { + "description": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressClassSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IngressClass", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressClassList": { + "description": "IngressClassList is a collection of IngressClasses.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of IngressClasses.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IngressClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressClassParametersReference": { + "description": "IngressClassParametersReference identifies an API object. This can be used to specify a cluster or namespace-scoped resource.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "apiGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "type": "string" + }, + "kind": { + "description": "kind is the type of resource being referenced.", + "type": "string" + }, + "name": { + "description": "name is the name of resource being referenced.", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the resource being referenced. This field is required when scope is set to \"Namespace\" and must be unset when scope is set to \"Cluster\".", + "type": "string" + }, + "scope": { + "description": "scope represents if this refers to a cluster or namespace scoped resource. This may be set to \"Cluster\" (default) or \"Namespace\".", + "type": "string" + } + } + }, + "io.k8s.api.networking.v1.IngressClassSpec": { + "description": "IngressClassSpec provides information about the class of an Ingress.", + "type": "object", + "properties": { + "controller": { + "description": "controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "type": "string" + }, + "parameters": { + "description": "parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressClassParametersReference" + } + } + }, + "io.k8s.api.networking.v1.IngressList": { + "description": "IngressList is a collection of Ingress.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of Ingress.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.Ingress" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IngressList", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.IngressLoadBalancerIngress": { + "description": "IngressLoadBalancerIngress represents the status of a load-balancer ingress point.", + "type": "object", + "properties": { + "hostname": { + "description": "hostname is set for load-balancer ingress points that are DNS based.", + "type": "string" + }, + "ip": { + "description": "ip is set for load-balancer ingress points that are IP based.", + "type": "string" + }, + "ports": { + "description": "ports provides information about the ports exposed by this LoadBalancer.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressPortStatus" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.networking.v1.IngressLoadBalancerStatus": { + "description": "IngressLoadBalancerStatus represents the status of a load-balancer.", + "type": "object", + "properties": { + "ingress": { + "description": "ingress is a list containing ingress points for the load-balancer.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressLoadBalancerIngress" + } + } + } + }, + "io.k8s.api.networking.v1.IngressPortStatus": { + "description": "IngressPortStatus represents the error condition of a service port", + "type": "object", + "required": [ + "port", + "protocol" + ], + "properties": { + "error": { + "description": "error is to record the problem with the service port The format of the error shall comply with the following rules: - built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.", + "type": "string" + }, + "port": { + "description": "port is the port number of the ingress port.", + "type": "integer", + "format": "int32" + }, + "protocol": { + "description": "protocol is the protocol of the ingress port. The supported values are: \"TCP\", \"UDP\", \"SCTP\"", + "type": "string" + } + } + }, + "io.k8s.api.networking.v1.IngressRule": { + "description": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "type": "object", + "properties": { + "host": { + "description": "host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nhost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If host is precise, the request matches this rule if the http host header is equal to Host. 2. If host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "type": "string" + }, + "http": { + "$ref": "#/definitions/io.k8s.api.networking.v1.HTTPIngressRuleValue" + } + } + }, + "io.k8s.api.networking.v1.IngressServiceBackend": { + "description": "IngressServiceBackend references a Kubernetes Service as a Backend.", + "type": "object", + "required": [ + "name" + ], + "properties": { + "name": { + "description": "name is the referenced service. The service must exist in the same namespace as the Ingress object.", + "type": "string" + }, + "port": { + "description": "port of the referenced service. A port name or port number is required for a IngressServiceBackend.", + "$ref": "#/definitions/io.k8s.api.networking.v1.ServiceBackendPort" + } + } + }, + "io.k8s.api.networking.v1.IngressSpec": { + "description": "IngressSpec describes the Ingress the user wishes to exist.", + "type": "object", + "properties": { + "defaultBackend": { + "description": "defaultBackend is the backend that should handle requests that don't match any rule. If Rules are not specified, DefaultBackend must be specified. If DefaultBackend is not set, the handling of requests that do not match any of the rules will be up to the Ingress controller.", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressBackend" + }, + "ingressClassName": { + "description": "ingressClassName is the name of an IngressClass cluster resource. Ingress controller implementations use this field to know whether they should be serving this Ingress resource, by a transitive connection (controller -\u003e IngressClass -\u003e Ingress resource). Although the `kubernetes.io/ingress.class` annotation (simple constant name) was never formally defined, it was widely supported by Ingress controllers to create a direct binding between Ingress controller and Ingress resources. Newly created Ingress resources should prefer using the field. However, even though the annotation is officially deprecated, for backwards compatibility reasons, ingress controllers should still honor that annotation if present.", + "type": "string" + }, + "rules": { + "description": "rules is a list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressRule" + }, + "x-kubernetes-list-type": "atomic" + }, + "tls": { + "description": "tls represents the TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressTLS" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.networking.v1.IngressStatus": { + "description": "IngressStatus describe the current state of the Ingress.", + "type": "object", + "properties": { + "loadBalancer": { + "description": "loadBalancer contains the current status of the load-balancer.", + "$ref": "#/definitions/io.k8s.api.networking.v1.IngressLoadBalancerStatus" + } + } + }, + "io.k8s.api.networking.v1.IngressTLS": { + "description": "IngressTLS describes the transport layer security associated with an ingress.", + "type": "object", + "properties": { + "hosts": { + "description": "hosts is a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "secretName": { + "description": "secretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the \"Host\" header is used for routing.", + "type": "string" + } + } + }, + "io.k8s.api.networking.v1.NetworkPolicy": { + "description": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec represents the specification of the desired behavior for this NetworkPolicy.", + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicySpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "NetworkPolicy", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.NetworkPolicyEgressRule": { + "description": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "type": "object", + "properties": { + "ports": { + "description": "ports is a list of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" + } + }, + "to": { + "description": "to is a list of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" + } + } + } + }, + "io.k8s.api.networking.v1.NetworkPolicyIngressRule": { + "description": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", + "type": "object", + "properties": { + "from": { + "description": "from is a list of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPeer" + } + }, + "ports": { + "description": "ports is a list of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyPort" + } + } + } + }, + "io.k8s.api.networking.v1.NetworkPolicyList": { + "description": "NetworkPolicyList is a list of NetworkPolicy objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicy" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "NetworkPolicyList", + "version": "v1" + } + ] + }, + "io.k8s.api.networking.v1.NetworkPolicyPeer": { + "description": "NetworkPolicyPeer describes a peer to allow traffic to/from. Only certain combinations of fields are allowed", + "type": "object", + "properties": { + "ipBlock": { + "description": "ipBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", + "$ref": "#/definitions/io.k8s.api.networking.v1.IPBlock" + }, + "namespaceSelector": { + "description": "namespaceSelector selects namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf podSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the namespaces selected by namespaceSelector. Otherwise it selects all pods in the namespaces selected by namespaceSelector.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "podSelector": { + "description": "podSelector is a label selector which selects pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf namespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the pods matching podSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the pods matching podSelector in the policy's own namespace.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + } + }, + "io.k8s.api.networking.v1.NetworkPolicyPort": { + "description": "NetworkPolicyPort describes a port to allow traffic on", + "type": "object", + "properties": { + "endPort": { + "description": "endPort indicates that the range of ports from port to endPort if set, inclusive, should be allowed by the policy. This field cannot be defined if the port field is not defined or if the port field is defined as a named (string) port. The endPort must be equal or greater than port.", + "type": "integer", + "format": "int32" + }, + "port": { + "description": "port represents the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + }, + "protocol": { + "description": "protocol represents the protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "type": "string" + } + } + }, + "io.k8s.api.networking.v1.NetworkPolicySpec": { + "description": "NetworkPolicySpec provides the specification of a NetworkPolicy", + "type": "object", + "required": [ + "podSelector" + ], + "properties": { + "egress": { + "description": "egress is a list of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyEgressRule" + } + }, + "ingress": { + "description": "ingress is a list of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1.NetworkPolicyIngressRule" + } + }, + "podSelector": { + "description": "podSelector selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "policyTypes": { + "description": "policyTypes is a list of rule types that the NetworkPolicy relates to. Valid options are [\"Ingress\"], [\"Egress\"], or [\"Ingress\", \"Egress\"]. If this field is not specified, it will default based on the existence of ingress or egress rules; policies that contain an egress section are assumed to affect egress, and all policies (whether or not they contain an ingress section) are assumed to affect ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.networking.v1.ServiceBackendPort": { + "description": "ServiceBackendPort is the service port being referenced.", + "type": "object", + "properties": { + "name": { + "description": "name is the name of the port on the Service. This is a mutually exclusive setting with \"Number\".", + "type": "string" + }, + "number": { + "description": "number is the numerical port number (e.g. 80) on the Service. This is a mutually exclusive setting with \"Name\".", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.networking.v1alpha1.IPAddress": { + "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddressSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IPAddress", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.IPAddressList": { + "description": "IPAddressList contains a list of IPAddress.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of IPAddresses.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.IPAddress" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "IPAddressList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.IPAddressSpec": { + "description": "IPAddressSpec describe the attributes in an IP Address.", + "type": "object", + "properties": { + "parentRef": { + "description": "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.", + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ParentReference" + } + } + }, + "io.k8s.api.networking.v1alpha1.ParentReference": { + "description": "ParentReference describes a reference to a parent object.", + "type": "object", + "properties": { + "group": { + "description": "Group is the group of the object being referenced.", + "type": "string" + }, + "name": { + "description": "Name is the name of the object being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace is the namespace of the object being referenced.", + "type": "string" + }, + "resource": { + "description": "Resource is the resource of the object being referenced.", + "type": "string" + } + } + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDR": { + "description": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRSpec" + }, + "status": { + "description": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ServiceCIDR", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDRList": { + "description": "ServiceCIDRList contains a list of ServiceCIDR objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of ServiceCIDRs.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ServiceCIDRList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDRSpec": { + "description": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.", + "type": "object", + "properties": { + "cidrs": { + "description": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.networking.v1alpha1.ServiceCIDRStatus": { + "description": "ServiceCIDRStatus describes the current state of the ServiceCIDR.", + "type": "object", + "properties": { + "conditions": { + "description": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.node.v1.Overhead": { + "description": "Overhead structure represents the resource overhead associated with running a pod.", + "type": "object", + "properties": { + "podFixed": { + "description": "podFixed represents the fixed resource overhead associated with running a pod.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + } + } + } + }, + "io.k8s.api.node.v1.RuntimeClass": { + "description": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://kubernetes.io/docs/concepts/containers/runtime-class/", + "type": "object", + "required": [ + "handler" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "handler": { + "description": "handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node \u0026 CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must be lowercase, conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "overhead": { + "description": "overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see\n https://kubernetes.io/docs/concepts/scheduling-eviction/pod-overhead/", + "$ref": "#/definitions/io.k8s.api.node.v1.Overhead" + }, + "scheduling": { + "description": "scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", + "$ref": "#/definitions/io.k8s.api.node.v1.Scheduling" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "node.k8s.io", + "kind": "RuntimeClass", + "version": "v1" + } + ] + }, + "io.k8s.api.node.v1.RuntimeClassList": { + "description": "RuntimeClassList is a list of RuntimeClass objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is a list of schema objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.node.v1.RuntimeClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "node.k8s.io", + "kind": "RuntimeClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.node.v1.Scheduling": { + "description": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "type": "object", + "properties": { + "nodeSelector": { + "description": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "type": "object", + "additionalProperties": { + "type": "string" + }, + "x-kubernetes-map-type": "atomic" + }, + "tolerations": { + "description": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.Toleration" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.policy.v1.Eviction": { + "description": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods/\u003cpod name\u003e/evictions.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "deleteOptions": { + "description": "DeleteOptions may be provided", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "ObjectMeta describes the pod that is being evicted.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "policy", + "kind": "Eviction", + "version": "v1" + } + ] + }, + "io.k8s.api.policy.v1.PodDisruptionBudget": { + "description": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Specification of the desired behavior of the PodDisruptionBudget.", + "$ref": "#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetSpec" + }, + "status": { + "description": "Most recently observed status of the PodDisruptionBudget.", + "$ref": "#/definitions/io.k8s.api.policy.v1.PodDisruptionBudgetStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "policy", + "kind": "PodDisruptionBudget", + "version": "v1" + } + ] + }, + "io.k8s.api.policy.v1.PodDisruptionBudgetList": { + "description": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of PodDisruptionBudgets", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.policy.v1.PodDisruptionBudget" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "policy", + "kind": "PodDisruptionBudgetList", + "version": "v1" + } + ] + }, + "io.k8s.api.policy.v1.PodDisruptionBudgetSpec": { + "description": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "type": "object", + "properties": { + "maxUnavailable": { + "description": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + }, + "minAvailable": { + "description": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.util.intstr.IntOrString" + }, + "selector": { + "description": "Label query over pods whose evictions are managed by the disruption budget. A null selector will match no pods, while an empty ({}) selector will select all pods within the namespace.", + "x-kubernetes-patch-strategy": "replace", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "unhealthyPodEvictionPolicy": { + "description": "UnhealthyPodEvictionPolicy defines the criteria for when unhealthy pods should be considered for eviction. Current implementation considers healthy pods, as pods that have status.conditions item with type=\"Ready\",status=\"True\".\n\nValid policies are IfHealthyBudget and AlwaysAllow. If no policy is specified, the default behavior will be used, which corresponds to the IfHealthyBudget policy.\n\nIfHealthyBudget policy means that running pods (status.phase=\"Running\"), but not yet healthy can be evicted only if the guarded application is not disrupted (status.currentHealthy is at least equal to status.desiredHealthy). Healthy pods will be subject to the PDB for eviction.\n\nAlwaysAllow policy means that all running pods (status.phase=\"Running\"), but not yet healthy are considered disrupted and can be evicted regardless of whether the criteria in a PDB is met. This means perspective running pods of a disrupted application might not get a chance to become healthy. Healthy pods will be subject to the PDB for eviction.\n\nAdditional policies may be added in the future. Clients making eviction decisions should disallow eviction of unhealthy pods if they encounter an unrecognized policy in this field.\n\nThis field is beta-level. The eviction API uses this field when the feature gate PDBUnhealthyPodEvictionPolicy is enabled (enabled by default).", + "type": "string" + } + } + }, + "io.k8s.api.policy.v1.PodDisruptionBudgetStatus": { + "description": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "type": "object", + "required": [ + "disruptionsAllowed", + "currentHealthy", + "desiredHealthy", + "expectedPods" + ], + "properties": { + "conditions": { + "description": "Conditions contain conditions for PDB. The disruption controller sets the DisruptionAllowed condition. The following are known values for the reason field (additional reasons could be added in the future): - SyncFailed: The controller encountered an error and wasn't able to compute\n the number of allowed disruptions. Therefore no disruptions are\n allowed and the status of the condition will be False.\n- InsufficientPods: The number of pods are either at or below the number\n required by the PodDisruptionBudget. No disruptions are\n allowed and the status of the condition will be False.\n- SufficientPods: There are more pods than required by the PodDisruptionBudget.\n The condition will be True, and the number of allowed\n disruptions are provided by the disruptionsAllowed property.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", + "x-kubernetes-patch-strategy": "merge" + }, + "currentHealthy": { + "description": "current number of healthy pods", + "type": "integer", + "format": "int32" + }, + "desiredHealthy": { + "description": "minimum desired number of healthy pods", + "type": "integer", + "format": "int32" + }, + "disruptedPods": { + "description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + }, + "disruptionsAllowed": { + "description": "Number of pod disruptions that are currently allowed.", + "type": "integer", + "format": "int32" + }, + "expectedPods": { + "description": "total number of pods counted by this disruption budget", + "type": "integer", + "format": "int32" + }, + "observedGeneration": { + "description": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.rbac.v1.AggregationRule": { + "description": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "type": "object", + "properties": { + "clusterRoleSelectors": { + "description": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + } + } + } + }, + "io.k8s.api.rbac.v1.ClusterRole": { + "description": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "type": "object", + "properties": { + "aggregationRule": { + "description": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", + "$ref": "#/definitions/io.k8s.api.rbac.v1.AggregationRule" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "rules": { + "description": "Rules holds all the PolicyRules for this ClusterRole", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRole", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBinding": { + "description": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "type": "object", + "required": [ + "roleRef" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "roleRef": { + "description": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef" + }, + "subjects": { + "description": "Subjects holds references to the objects the role applies to.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleBindingList": { + "description": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoleBindings", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRoleBinding" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.ClusterRoleList": { + "description": "ClusterRoleList is a collection of ClusterRoles", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of ClusterRoles", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.ClusterRole" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "ClusterRoleList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.PolicyRule": { + "description": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "type": "object", + "required": [ + "verbs" + ], + "properties": { + "apiGroups": { + "description": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"\" represents the core API group and \"*\" represents all API groups.", + "type": "array", + "items": { + "type": "string" + } + }, + "nonResourceURLs": { + "description": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "type": "array", + "items": { + "type": "string" + } + }, + "resourceNames": { + "description": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "type": "array", + "items": { + "type": "string" + } + }, + "resources": { + "description": "Resources is a list of resources this rule applies to. '*' represents all resources.", + "type": "array", + "items": { + "type": "string" + } + }, + "verbs": { + "description": "Verbs is a list of Verbs that apply to ALL the ResourceKinds contained in this rule. '*' represents all verbs.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.rbac.v1.Role": { + "description": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "rules": { + "description": "Rules holds all the PolicyRules for this Role", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.PolicyRule" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "Role", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleBinding": { + "description": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "type": "object", + "required": [ + "roleRef" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "roleRef": { + "description": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. This field is immutable.", + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleRef" + }, + "subjects": { + "description": "Subjects holds references to the objects the role applies to.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Subject" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "RoleBinding", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleBindingList": { + "description": "RoleBindingList is a collection of RoleBindings", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of RoleBindings", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.RoleBinding" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "RoleBindingList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleList": { + "description": "RoleList is a collection of Roles", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is a list of Roles", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.rbac.v1.Role" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "rbac.authorization.k8s.io", + "kind": "RoleList", + "version": "v1" + } + ] + }, + "io.k8s.api.rbac.v1.RoleRef": { + "description": "RoleRef contains information that points to the role being used", + "type": "object", + "required": [ + "apiGroup", + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.rbac.v1.Subject": { + "description": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "type": "string" + }, + "kind": { + "description": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "type": "string" + }, + "name": { + "description": "Name of the object being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.resource.v1alpha2.AllocationResult": { + "description": "AllocationResult contains attributes of an allocated resource.", + "type": "object", + "properties": { + "availableOnNodes": { + "description": "This field will get set by the resource driver after it has allocated the resource to inform the scheduler where it can schedule Pods using the ResourceClaim.\n\nSetting this field is optional. If null, the resource is available everywhere.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + }, + "resourceHandles": { + "description": "ResourceHandles contain the state associated with an allocation that should be maintained throughout the lifetime of a claim. Each ResourceHandle contains data that should be passed to a specific kubelet plugin once it lands on a node. This data is returned by the driver after a successful allocation and is opaque to Kubernetes. Driver documentation may explain to users how to interpret this data if needed.\n\nSetting this field is optional. It has a maximum size of 32 entries. If null (or empty), it is assumed this allocation will be processed by a single kubelet plugin with no ResourceHandle data attached. The name of the kubelet plugin invoked will match the DriverName set in the ResourceClaimStatus this AllocationResult is embedded in.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceHandle" + }, + "x-kubernetes-list-type": "atomic" + }, + "shareable": { + "description": "Shareable determines whether the resource supports more than one consumer at a time.", + "type": "boolean" + } + } + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContext": { + "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec describes where resources for the Pod are needed.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec" + }, + "status": { + "description": "Status describes where resources for the Pod can be allocated.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "PodSchedulingContext", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContextList": { + "description": "PodSchedulingContextList is a collection of Pod scheduling objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of PodSchedulingContext objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.PodSchedulingContext" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "PodSchedulingContextList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContextSpec": { + "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.", + "type": "object", + "properties": { + "potentialNodes": { + "description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + }, + "selectedNode": { + "description": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha2.PodSchedulingContextStatus": { + "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", + "type": "object", + "properties": { + "resourceClaims": { + "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceClaim": { + "description": "ResourceClaim describes which resources are needed by a resource consumer. Its status tracks whether the resource has been allocated and what the resulting attributes are.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec describes the desired attributes of a resource that then needs to be allocated. It can only be set once when creating the ResourceClaim.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec" + }, + "status": { + "description": "Status describes whether the resource is available and with which attributes.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaim", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference": { + "description": "ResourceClaimConsumerReference contains enough information to let you locate the consumer of a ResourceClaim. The user must be a resource in the same namespace as the ResourceClaim.", + "type": "object", + "required": [ + "resource", + "name", + "uid" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced.", + "type": "string" + }, + "resource": { + "description": "Resource is the type of resource being referenced, for example \"pods\".", + "type": "string" + }, + "uid": { + "description": "UID identifies exactly one incarnation of the resource.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimList": { + "description": "ResourceClaimList is a collection of claims.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of resource claims.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaim" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaimList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference": { + "description": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata, for example \"ConfigMap\".", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimSchedulingStatus": { + "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", + "type": "object", + "properties": { + "name": { + "description": "Name matches the pod.spec.resourceClaims[*].Name field.", + "type": "string" + }, + "unsuitableNodes": { + "description": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "atomic" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimSpec": { + "description": "ResourceClaimSpec defines how a resource is to be allocated.", + "type": "object", + "required": [ + "resourceClassName" + ], + "properties": { + "allocationMode": { + "description": "Allocation can start immediately or when a Pod wants to use the resource. \"WaitForFirstConsumer\" is the default.", + "type": "string" + }, + "parametersRef": { + "description": "ParametersRef references a separate object with arbitrary parameters that will be used by the driver when allocating a resource for the claim.\n\nThe object must be in the same namespace as the ResourceClaim.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference" + }, + "resourceClassName": { + "description": "ResourceClassName references the driver and additional parameters via the name of a ResourceClass that was created as part of the driver deployment.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimStatus": { + "description": "ResourceClaimStatus tracks whether the resource has been allocated and what the resulting attributes are.", + "type": "object", + "properties": { + "allocation": { + "description": "Allocation is set by the resource driver once a resource or set of resources has been allocated successfully. If this is not specified, the resources have not been allocated yet.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.AllocationResult" + }, + "deallocationRequested": { + "description": "DeallocationRequested indicates that a ResourceClaim is to be deallocated.\n\nThe driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nWhile DeallocationRequested is set, no new consumers may be added to ReservedFor.", + "type": "boolean" + }, + "driverName": { + "description": "DriverName is a copy of the driver name from the ResourceClass at the time when allocation started.", + "type": "string" + }, + "reservedFor": { + "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimConsumerReference" + }, + "x-kubernetes-list-map-keys": [ + "uid" + ], + "x-kubernetes-list-type": "map" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplate": { + "description": "ResourceClaimTemplate is used to produce ResourceClaim objects.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Describes the ResourceClaim that is to be generated.\n\nThis field is immutable. A ResourceClaim will get created by the control plane for a Pod when needed and then not get updated anymore.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaimTemplate", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateList": { + "description": "ResourceClaimTemplateList is a collection of claim templates.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of resource claim templates.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimTemplate" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClaimTemplateList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClaimTemplateSpec": { + "description": "ResourceClaimTemplateSpec contains the metadata and fields for a ResourceClaim.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "metadata": { + "description": "ObjectMeta may contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "Spec for the ResourceClaim. The entire content is copied unchanged into the ResourceClaim that gets created from this template. The same fields as in a ResourceClaim are also valid here.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClaimSpec" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceClass": { + "description": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", + "type": "object", + "required": [ + "driverName" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "parametersRef": { + "description": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.", + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClassParametersReference" + }, + "suitableNodes": { + "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.", + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClass", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClassList": { + "description": "ResourceClassList is a collection of classes.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of resource classes.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.resource.v1alpha2.ResourceClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "resource.k8s.io", + "kind": "ResourceClassList", + "version": "v1alpha2" + } + ] + }, + "io.k8s.api.resource.v1alpha2.ResourceClassParametersReference": { + "description": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.", + "type": "object", + "required": [ + "kind", + "name" + ], + "properties": { + "apiGroup": { + "description": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.", + "type": "string" + }, + "kind": { + "description": "Kind is the type of resource being referenced. This is the same value as in the parameter object's metadata.", + "type": "string" + }, + "name": { + "description": "Name is the name of resource being referenced.", + "type": "string" + }, + "namespace": { + "description": "Namespace that contains the referenced resource. Must be empty for cluster-scoped resources and non-empty for namespaced resources.", + "type": "string" + } + } + }, + "io.k8s.api.resource.v1alpha2.ResourceHandle": { + "description": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.", + "type": "object", + "properties": { + "data": { + "description": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.", + "type": "string" + }, + "driverName": { + "description": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.", + "type": "string" + } + } + }, + "io.k8s.api.scheduling.v1.PriorityClass": { + "description": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "type": "object", + "required": [ + "value" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "description": { + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "type": "string" + }, + "globalDefault": { + "description": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "type": "boolean" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "preemptionPolicy": { + "description": "preemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset.", + "type": "string" + }, + "value": { + "description": "value represents the integer value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "type": "integer", + "format": "int32" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "scheduling.k8s.io", + "kind": "PriorityClass", + "version": "v1" + } + ] + }, + "io.k8s.api.scheduling.v1.PriorityClassList": { + "description": "PriorityClassList is a collection of priority classes.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of PriorityClasses", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.scheduling.v1.PriorityClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "scheduling.k8s.io", + "kind": "PriorityClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIDriver": { + "description": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec represents the specification of the CSI Driver.", + "$ref": "#/definitions/io.k8s.api.storage.v1.CSIDriverSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIDriver", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIDriverList": { + "description": "CSIDriverList is a collection of CSIDriver objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CSIDriver", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSIDriver" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIDriverList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIDriverSpec": { + "description": "CSIDriverSpec is the specification of a CSIDriver.", + "type": "object", + "properties": { + "attachRequired": { + "description": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.\n\nThis field is immutable.", + "type": "boolean" + }, + "fsGroupPolicy": { + "description": "fsGroupPolicy defines if the underlying volume supports changing ownership and permission of the volume before being mounted. Refer to the specific FSGroupPolicy values for additional details.\n\nThis field is immutable.\n\nDefaults to ReadWriteOnceWithFSType, which will examine each volume to determine if Kubernetes should modify ownership and permissions of the volume. With the default policy the defined fsGroup will only be applied if a fstype is defined and the volume's access mode contains ReadWriteOnce.", + "type": "string" + }, + "podInfoOnMount": { + "description": "podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations, if set to true. If set to false, pod information will not be passed on mount. Default is false.\n\nThe CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext.\n\nThe following VolumeContext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" if the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.\n\nThis field is immutable.", + "type": "boolean" + }, + "requiresRepublish": { + "description": "requiresRepublish indicates the CSI driver wants `NodePublishVolume` being periodically called to reflect any possible change in the mounted volume. This field defaults to false.\n\nNote: After a successful initial NodePublishVolume call, subsequent calls to NodePublishVolume should only update the contents of the volume. New mount points will not be seen by a running container.", + "type": "boolean" + }, + "seLinuxMount": { + "description": "seLinuxMount specifies if the CSI driver supports \"-o context\" mount option.\n\nWhen \"true\", the CSI driver must ensure that all volumes provided by this CSI driver can be mounted separately with different `-o context` options. This is typical for storage backends that provide volumes as filesystems on block devices or as independent shared volumes. Kubernetes will call NodeStage / NodePublish with \"-o context=xyz\" mount option when mounting a ReadWriteOncePod volume used in Pod that has explicitly set SELinux context. In the future, it may be expanded to other volume AccessModes. In any case, Kubernetes will ensure that the volume is mounted only with a single SELinux context.\n\nWhen \"false\", Kubernetes won't pass any special SELinux mount options to the driver. This is typical for volumes that represent subdirectories of a bigger shared filesystem.\n\nDefault is \"false\".", + "type": "boolean" + }, + "storageCapacity": { + "description": "storageCapacity indicates that the CSI volume driver wants pod scheduling to consider the storage capacity that the driver deployment will report by creating CSIStorageCapacity objects with capacity information, if set to true.\n\nThe check can be enabled immediately when deploying a driver. In that case, provisioning new volumes with late binding will pause until the driver deployment has published some suitable CSIStorageCapacity object.\n\nAlternatively, the driver can be deployed with the field unset or false and it can be flipped later when storage capacity information has been published.\n\nThis field was immutable in Kubernetes \u003c= 1.22 and now is mutable.", + "type": "boolean" + }, + "tokenRequests": { + "description": "tokenRequests indicates the CSI driver needs pods' service account tokens it is mounting volume for to do necessary authentication. Kubelet will pass the tokens in VolumeContext in the CSI NodePublishVolume calls. The CSI driver should parse and validate the following VolumeContext: \"csi.storage.k8s.io/serviceAccount.tokens\": {\n \"\u003caudience\u003e\": {\n \"token\": \u003ctoken\u003e,\n \"expirationTimestamp\": \u003cexpiration timestamp in RFC3339\u003e,\n },\n ...\n}\n\nNote: Audience in each TokenRequest should be different and at most one token is empty string. To receive a new token after expiry, RequiresRepublish can be used to trigger NodePublishVolume periodically.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.TokenRequest" + }, + "x-kubernetes-list-type": "atomic" + }, + "volumeLifecycleModes": { + "description": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism.\n\nThe other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume.\n\nFor more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.\n\nThis field is beta. This field is immutable.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-list-type": "set" + } + } + }, + "io.k8s.api.storage.v1.CSINode": { + "description": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. metadata.name must be the Kubernetes node name.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec is the specification of CSINode", + "$ref": "#/definitions/io.k8s.api.storage.v1.CSINodeSpec" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSINode", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSINodeDriver": { + "description": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "type": "object", + "required": [ + "name", + "nodeID" + ], + "properties": { + "allocatable": { + "description": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeNodeResources" + }, + "name": { + "description": "name represents the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "type": "string" + }, + "nodeID": { + "description": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "type": "string" + }, + "topologyKeys": { + "description": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.api.storage.v1.CSINodeList": { + "description": "CSINodeList is a collection of CSINode objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CSINode", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSINode" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSINodeList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSINodeSpec": { + "description": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "type": "object", + "required": [ + "drivers" + ], + "properties": { + "drivers": { + "description": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSINodeDriver" + }, + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.api.storage.v1.CSIStorageCapacity": { + "description": "CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.\n\nFor example this can express things like: - StorageClass \"standard\" has \"1234 GiB\" available in \"topology.kubernetes.io/zone=us-east1\" - StorageClass \"localssd\" has \"10 GiB\" available in \"kubernetes.io/hostname=knode-abc123\"\n\nThe following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero\n\nThe producer of these objects can decide which approach is more suitable.\n\nThey are consumed by the kube-scheduler when a CSI driver opts into capacity-aware scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the MaximumVolumeSize against the requested size of pending volumes to filter out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to a comparison against the less precise Capacity. If that is also unset, the scheduler assumes that capacity is insufficient and tries some other node.", + "type": "object", + "required": [ + "storageClassName" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "capacity": { + "description": "capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThe semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "maximumVolumeSize": { + "description": "maximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.\n\nThis is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.api.resource.Quantity" + }, + "metadata": { + "description": "Standard object's metadata. The name has no particular meaning. It must be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-\u003cuuid\u003e, a generated name, or a reverse-domain name which ends with the unique CSI driver name.\n\nObjects are namespaced.\n\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "nodeTopology": { + "description": "nodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector" + }, + "storageClassName": { + "description": "storageClassName represents the name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIStorageCapacity", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.CSIStorageCapacityList": { + "description": "CSIStorageCapacityList is a collection of CSIStorageCapacity objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of CSIStorageCapacity objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.CSIStorageCapacity" + }, + "x-kubernetes-list-map-keys": [ + "name" + ], + "x-kubernetes-list-type": "map" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "CSIStorageCapacityList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.StorageClass": { + "description": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "type": "object", + "required": [ + "provisioner" + ], + "properties": { + "allowVolumeExpansion": { + "description": "allowVolumeExpansion shows whether the storage class allow volume expand.", + "type": "boolean" + }, + "allowedTopologies": { + "description": "allowedTopologies restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.core.v1.TopologySelectorTerm" + }, + "x-kubernetes-list-type": "atomic" + }, + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "mountOptions": { + "description": "mountOptions controls the mountOptions for dynamically provisioned PersistentVolumes of this storage class. e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "type": "array", + "items": { + "type": "string" + } + }, + "parameters": { + "description": "parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "provisioner": { + "description": "provisioner indicates the type of the provisioner.", + "type": "string" + }, + "reclaimPolicy": { + "description": "reclaimPolicy controls the reclaimPolicy for dynamically provisioned PersistentVolumes of this storage class. Defaults to Delete.", + "type": "string" + }, + "volumeBindingMode": { + "description": "volumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "StorageClass", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.StorageClassList": { + "description": "StorageClassList is a collection of storage classes.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of StorageClasses", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.StorageClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "StorageClassList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.TokenRequest": { + "description": "TokenRequest contains parameters of a service account token.", + "type": "object", + "required": [ + "audience" + ], + "properties": { + "audience": { + "description": "audience is the intended audience of the token in \"TokenRequestSpec\". It will default to the audiences of kube apiserver.", + "type": "string" + }, + "expirationSeconds": { + "description": "expirationSeconds is the duration of validity of the token in \"TokenRequestSpec\". It has the same default value of \"ExpirationSeconds\" in \"TokenRequestSpec\".", + "type": "integer", + "format": "int64" + } + } + }, + "io.k8s.api.storage.v1.VolumeAttachment": { + "description": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec represents specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSpec" + }, + "status": { + "description": "status represents status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachmentStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttachment", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.VolumeAttachmentList": { + "description": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttachments", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachment" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttachmentList", + "version": "v1" + } + ] + }, + "io.k8s.api.storage.v1.VolumeAttachmentSource": { + "description": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "type": "object", + "properties": { + "inlineVolumeSpec": { + "description": "inlineVolumeSpec contains all the information necessary to attach a persistent volume defined by a pod's inline VolumeSource. This field is populated only for the CSIMigration feature. It contains translated fields from a pod's inline VolumeSource to a PersistentVolumeSpec. This field is beta-level and is only honored by servers that enabled the CSIMigration feature.", + "$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeSpec" + }, + "persistentVolumeName": { + "description": "persistentVolumeName represents the name of the persistent volume to attach.", + "type": "string" + } + } + }, + "io.k8s.api.storage.v1.VolumeAttachmentSpec": { + "description": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "type": "object", + "required": [ + "attacher", + "source", + "nodeName" + ], + "properties": { + "attacher": { + "description": "attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "type": "string" + }, + "nodeName": { + "description": "nodeName represents the node that the volume should be attached to.", + "type": "string" + }, + "source": { + "description": "source represents the volume that should be attached.", + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeAttachmentSource" + } + } + }, + "io.k8s.api.storage.v1.VolumeAttachmentStatus": { + "description": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "type": "object", + "required": [ + "attached" + ], + "properties": { + "attachError": { + "description": "attachError represents the last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeError" + }, + "attached": { + "description": "attached indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "type": "boolean" + }, + "attachmentMetadata": { + "description": "attachmentMetadata is populated with any information returned by the attach operation, upon successful attach, that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "detachError": { + "description": "detachError represents the last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", + "$ref": "#/definitions/io.k8s.api.storage.v1.VolumeError" + } + } + }, + "io.k8s.api.storage.v1.VolumeError": { + "description": "VolumeError captures an error encountered during a volume operation.", + "type": "object", + "properties": { + "message": { + "description": "message represents the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "type": "string" + }, + "time": { + "description": "time represents the time the error was encountered.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "io.k8s.api.storage.v1.VolumeNodeResources": { + "description": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "type": "object", + "properties": { + "count": { + "description": "count indicates the maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClass": { + "description": "VolumeAttributesClass represents a specification of mutable volume attributes defined by the CSI driver. The class can be specified during dynamic provisioning of PersistentVolumeClaims, and changed in the PersistentVolumeClaim spec after provisioning.", + "type": "object", + "required": [ + "driverName" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "driverName": { + "description": "Name of the CSI driver This field is immutable.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "parameters": { + "description": "parameters hold volume attributes defined by the CSI driver. These values are opaque to the Kubernetes and are passed directly to the CSI driver. The underlying storage provider supports changing these attributes on an existing volume, however the parameters field itself is immutable. To invoke a volume update, a new VolumeAttributesClass should be created with new parameters, and the PersistentVolumeClaim should be updated to reference the new VolumeAttributesClass.\n\nThis field is required and must contain at least one key/value pair. The keys cannot be empty, and the maximum number of parameters is 512, with a cumulative max size of 256K. If the CSI driver rejects invalid parameters, the target PersistentVolumeClaim will be set to an \"Infeasible\" state in the modifyVolumeStatus field.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClass", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.storage.v1alpha1.VolumeAttributesClassList": { + "description": "VolumeAttributesClassList is a collection of VolumeAttributesClass objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items is the list of VolumeAttributesClass objects.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.api.storage.v1alpha1.VolumeAttributesClass" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "storage.k8s.io", + "kind": "VolumeAttributesClassList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition": { + "description": "CustomResourceColumnDefinition specifies a column for server side printing.", + "type": "object", + "required": [ + "name", + "type", + "jsonPath" + ], + "properties": { + "description": { + "description": "description is a human readable description of this column.", + "type": "string" + }, + "format": { + "description": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.", + "type": "string" + }, + "jsonPath": { + "description": "jsonPath is a simple JSON path (i.e. with array notation) which is evaluated against each custom resource to produce the value for this column.", + "type": "string" + }, + "name": { + "description": "name is a human readable name for the column.", + "type": "string" + }, + "priority": { + "description": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a priority greater than 0.", + "type": "integer", + "format": "int32" + }, + "type": { + "description": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for details.", + "type": "string" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion": { + "description": "CustomResourceConversion describes how to convert different versions of a CR.", + "type": "object", + "required": [ + "strategy" + ], + "properties": { + "strategy": { + "description": "strategy specifies how custom resources are converted between versions. Allowed values are: - `\"None\"`: The converter only change the apiVersion and would not touch any other field in the custom resource. - `\"Webhook\"`: API Server will call to an external webhook to do the conversion. Additional information\n is needed for this option. This requires spec.preserveUnknownFields to be false, and spec.conversion.webhook to be set.", + "type": "string" + }, + "webhook": { + "description": "webhook describes how to call the conversion webhook. Required when `strategy` is set to `\"Webhook\"`.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition": { + "description": "CustomResourceDefinition represents a resource that should be exposed on the API server. Its name MUST be in the format \u003c.spec.name\u003e.\u003c.spec.group\u003e.", + "type": "object", + "required": [ + "spec" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + }, + "spec": { + "description": "spec describes how the user wants the resources to appear", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec" + }, + "status": { + "description": "status indicates the actual state of the CustomResourceDefinition", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiextensions.k8s.io", + "kind": "CustomResourceDefinition", + "version": "v1" + } + ] + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition": { + "description": "CustomResourceDefinitionCondition contains details for the current condition of this pod.", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "lastTransitionTime last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "message is a human-readable message indicating details about last transition.", + "type": "string" + }, + "reason": { + "description": "reason is a unique, one-word, CamelCase reason for the condition's last transition.", + "type": "string" + }, + "status": { + "description": "status is the status of the condition. Can be True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "type is the type of the condition. Types include Established, NamesAccepted and Terminating.", + "type": "string" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionList": { + "description": "CustomResourceDefinitionList is a list of CustomResourceDefinition objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "items list individual CustomResourceDefinition objects", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinition" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "description": "Standard object's metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiextensions.k8s.io", + "kind": "CustomResourceDefinitionList", + "version": "v1" + } + ] + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames": { + "description": "CustomResourceDefinitionNames indicates the names to serve this CustomResourceDefinition", + "type": "object", + "required": [ + "plural", + "kind" + ], + "properties": { + "categories": { + "description": "categories is a list of grouped resources this custom resource belongs to (e.g. 'all'). This is published in API discovery documents, and used by clients to support invocations like `kubectl get all`.", + "type": "array", + "items": { + "type": "string" + } + }, + "kind": { + "description": "kind is the serialized kind of the resource. It is normally CamelCase and singular. Custom resource instances will use this value as the `kind` attribute in API calls.", + "type": "string" + }, + "listKind": { + "description": "listKind is the serialized kind of the list for this resource. Defaults to \"`kind`List\".", + "type": "string" + }, + "plural": { + "description": "plural is the plural name of the resource to serve. The custom resources are served under `/apis/\u003cgroup\u003e/\u003cversion\u003e/.../\u003cplural\u003e`. Must match the name of the CustomResourceDefinition (in the form `\u003cnames.plural\u003e.\u003cgroup\u003e`). Must be all lowercase.", + "type": "string" + }, + "shortNames": { + "description": "shortNames are short names for the resource, exposed in API discovery documents, and used by clients to support invocations like `kubectl get \u003cshortname\u003e`. It must be all lowercase.", + "type": "array", + "items": { + "type": "string" + } + }, + "singular": { + "description": "singular is the singular name of the resource. It must be all lowercase. Defaults to lowercased `kind`.", + "type": "string" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionSpec": { + "description": "CustomResourceDefinitionSpec describes how a user wants their resource to appear", + "type": "object", + "required": [ + "group", + "names", + "scope", + "versions" + ], + "properties": { + "conversion": { + "description": "conversion defines conversion settings for the CRD.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceConversion" + }, + "group": { + "description": "group is the API group of the defined custom resource. The custom resources are served under `/apis/\u003cgroup\u003e/...`. Must match the name of the CustomResourceDefinition (in the form `\u003cnames.plural\u003e.\u003cgroup\u003e`).", + "type": "string" + }, + "names": { + "description": "names specify the resource and kind names for the custom resource.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames" + }, + "preserveUnknownFields": { + "description": "preserveUnknownFields indicates that object fields which are not specified in the OpenAPI schema should be preserved when persisting to storage. apiVersion, kind, metadata and known fields inside metadata are always preserved. This field is deprecated in favor of setting `x-preserve-unknown-fields` to true in `spec.versions[*].schema.openAPIV3Schema`. See https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#field-pruning for details.", + "type": "boolean" + }, + "scope": { + "description": "scope indicates whether the defined custom resource is cluster- or namespace-scoped. Allowed values are `Cluster` and `Namespaced`.", + "type": "string" + }, + "versions": { + "description": "versions is the list of all API versions of the defined custom resource. Version names are used to compute the order in which served versions are listed in API discovery. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion" + } + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionStatus": { + "description": "CustomResourceDefinitionStatus indicates the state of the CustomResourceDefinition", + "type": "object", + "properties": { + "acceptedNames": { + "description": "acceptedNames are the names that are actually being used to serve discovery. They may be different than the names in spec.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionNames" + }, + "conditions": { + "description": "conditions indicate state for particular aspects of a CustomResourceDefinition", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionCondition" + }, + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map" + }, + "storedVersions": { + "description": "storedVersions lists all versions of CustomResources that were ever persisted. Tracking these versions allows a migration path for stored versions in etcd. The field is mutable so a migration controller can finish a migration to another version (ensuring no old objects are left in storage), and then remove the rest of the versions from this list. Versions may not be removed from `spec.versions` while they exist in this list.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceDefinitionVersion": { + "description": "CustomResourceDefinitionVersion describes a version for CRD.", + "type": "object", + "required": [ + "name", + "served", + "storage" + ], + "properties": { + "additionalPrinterColumns": { + "description": "additionalPrinterColumns specifies additional columns returned in Table output. See https://kubernetes.io/docs/reference/using-api/api-concepts/#receiving-resources-as-tables for details. If no columns are specified, a single column displaying the age of the custom resource is used.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceColumnDefinition" + } + }, + "deprecated": { + "description": "deprecated indicates this version of the custom resource API is deprecated. When set to true, API requests to this version receive a warning header in the server response. Defaults to false.", + "type": "boolean" + }, + "deprecationWarning": { + "description": "deprecationWarning overrides the default warning returned to API clients. May only be set when `deprecated` is true. The default warning indicates this version is deprecated and recommends use of the newest served version of equal or greater stability, if one exists.", + "type": "string" + }, + "name": { + "description": "name is the version name, e.g. “v1”, “v2beta1”, etc. The custom resources are served under this version at `/apis/\u003cgroup\u003e/\u003cversion\u003e/...` if `served` is true.", + "type": "string" + }, + "schema": { + "description": "schema describes the schema used for validation, pruning, and defaulting of this version of the custom resource.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation" + }, + "served": { + "description": "served is a flag enabling/disabling this version from being served via REST APIs", + "type": "boolean" + }, + "storage": { + "description": "storage indicates this version should be used when persisting custom resources to storage. There must be exactly one version with storage=true.", + "type": "boolean" + }, + "subresources": { + "description": "subresources specify what subresources this version of the defined custom resource have.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale": { + "description": "CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources.", + "type": "object", + "required": [ + "specReplicasPath", + "statusReplicasPath" + ], + "properties": { + "labelSelectorPath": { + "description": "labelSelectorPath defines the JSON path inside of a custom resource that corresponds to Scale `status.selector`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status` or `.spec`. Must be set to work with HorizontalPodAutoscaler. The field pointed by this JSON path must be a string field (not a complex selector struct) which contains a serialized label selector in string form. More info: https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions#scale-subresource If there is no value under the given path in the custom resource, the `status.selector` value in the `/scale` subresource will default to the empty string.", + "type": "string" + }, + "specReplicasPath": { + "description": "specReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `spec.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.spec`. If there is no value under the given path in the custom resource, the `/scale` subresource will return an error on GET.", + "type": "string" + }, + "statusReplicasPath": { + "description": "statusReplicasPath defines the JSON path inside of a custom resource that corresponds to Scale `status.replicas`. Only JSON paths without the array notation are allowed. Must be a JSON Path under `.status`. If there is no value under the given path in the custom resource, the `status.replicas` value in the `/scale` subresource will default to 0.", + "type": "string" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus": { + "description": "CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza", + "type": "object" + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresources": { + "description": "CustomResourceSubresources defines the status and scale subresources for CustomResources.", + "type": "object", + "properties": { + "scale": { + "description": "scale indicates the custom resource should serve a `/scale` subresource that returns an `autoscaling/v1` Scale object.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceScale" + }, + "status": { + "description": "status indicates the custom resource should serve a `/status` subresource. When enabled: 1. requests to the custom resource primary endpoint ignore changes to the `status` stanza of the object. 2. requests to the custom resource `/status` subresource ignore changes to anything other than the `status` stanza of the object.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceSubresourceStatus" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.CustomResourceValidation": { + "description": "CustomResourceValidation is a list of validation methods for CustomResources.", + "type": "object", + "properties": { + "openAPIV3Schema": { + "description": "openAPIV3Schema is the OpenAPI v3 schema to use for validation and pruning.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation": { + "description": "ExternalDocumentation allows referencing an external resource for extended documentation.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON": { + "description": "JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps": { + "description": "JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/).", + "type": "object", + "properties": { + "$ref": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "additionalItems": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool" + }, + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool" + }, + "allOf": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + } + }, + "anyOf": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + } + }, + "default": { + "description": "default is a default value for undefined object fields. Defaulting is a beta feature under the CustomResourceDefaulting feature gate. Defaulting requires spec.preserveUnknownFields to be false.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + } + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray" + } + }, + "description": { + "type": "string" + }, + "enum": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + } + }, + "example": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + }, + "exclusiveMaximum": { + "type": "boolean" + }, + "exclusiveMinimum": { + "type": "boolean" + }, + "externalDocs": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ExternalDocumentation" + }, + "format": { + "description": "format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:\n\n- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like \"0321751043\" or \"978-0321751041\" - isbn10: an ISBN10 number string like \"0321751043\" - isbn13: an ISBN13 number string like \"978-0321751041\" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like \"#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like \"rgb(255,255,2559\" - byte: base64 encoded binary data - password: any kind of string - date: a date string like \"2006-01-02\" as defined by full-date in RFC3339 - duration: a duration string like \"22 ns\" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like \"2014-12-15T19:30:20.000Z\" as defined by date-time in RFC3339.", + "type": "string" + }, + "id": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray" + }, + "maxItems": { + "type": "integer", + "format": "int64" + }, + "maxLength": { + "type": "integer", + "format": "int64" + }, + "maxProperties": { + "type": "integer", + "format": "int64" + }, + "maximum": { + "type": "number", + "format": "double" + }, + "minItems": { + "type": "integer", + "format": "int64" + }, + "minLength": { + "type": "integer", + "format": "int64" + }, + "minProperties": { + "type": "integer", + "format": "int64" + }, + "minimum": { + "type": "number", + "format": "double" + }, + "multipleOf": { + "type": "number", + "format": "double" + }, + "not": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + }, + "nullable": { + "type": "boolean" + }, + "oneOf": { + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + } + }, + "pattern": { + "type": "string" + }, + "patternProperties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaProps" + } + }, + "required": { + "type": "array", + "items": { + "type": "string" + } + }, + "title": { + "type": "string" + }, + "type": { + "type": "string" + }, + "uniqueItems": { + "type": "boolean" + }, + "x-kubernetes-embedded-resource": { + "description": "x-kubernetes-embedded-resource defines that the value is an embedded Kubernetes runtime.Object, with TypeMeta and ObjectMeta. The type must be object. It is allowed to further restrict the embedded object. kind, apiVersion and metadata are validated automatically. x-kubernetes-preserve-unknown-fields is allowed to be true, but does not have to be if the object is fully specified (up to kind, apiVersion, metadata).", + "type": "boolean" + }, + "x-kubernetes-int-or-string": { + "description": "x-kubernetes-int-or-string specifies that this value is either an integer or a string. If this is true, an empty type is allowed and type as child of anyOf is permitted if following one of the following patterns:\n\n1) anyOf:\n - type: integer\n - type: string\n2) allOf:\n - anyOf:\n - type: integer\n - type: string\n - ... zero or more", + "type": "boolean" + }, + "x-kubernetes-list-map-keys": { + "description": "x-kubernetes-list-map-keys annotates an array with the x-kubernetes-list-type `map` by specifying the keys used as the index of the map.\n\nThis tag MUST only be used on lists that have the \"x-kubernetes-list-type\" extension set to \"map\". Also, the values specified for this attribute must be a scalar typed field of the child structure (no nesting is supported).\n\nThe properties specified must either be required or have a default value, to ensure those properties are present for all list items.", + "type": "array", + "items": { + "type": "string" + } + }, + "x-kubernetes-list-type": { + "description": "x-kubernetes-list-type annotates an array to further describe its topology. This extension must only be used on lists and may have 3 possible values:\n\n1) `atomic`: the list is treated as a single entity, like a scalar.\n Atomic lists will be entirely replaced when updated. This extension\n may be used on any type of list (struct, scalar, ...).\n2) `set`:\n Sets are lists that must not have multiple items with the same value. Each\n value must be a scalar, an object with x-kubernetes-map-type `atomic` or an\n array with x-kubernetes-list-type `atomic`.\n3) `map`:\n These lists are like maps in that their elements have a non-index key\n used to identify them. Order is preserved upon merge. The map tag\n must only be used on a list with elements of type object.\nDefaults to atomic for arrays.", + "type": "string" + }, + "x-kubernetes-map-type": { + "description": "x-kubernetes-map-type annotates an object to further describe its topology. This extension must only be used when type is object and may have 2 possible values:\n\n1) `granular`:\n These maps are actual maps (key-value pairs) and each fields are independent\n from each other (they can each be manipulated by separate actors). This is\n the default behaviour for all maps.\n2) `atomic`: the list is treated as a single entity, like a scalar.\n Atomic maps will be entirely replaced when updated.", + "type": "string" + }, + "x-kubernetes-preserve-unknown-fields": { + "description": "x-kubernetes-preserve-unknown-fields stops the API server decoding step from pruning fields which are not specified in the validation schema. This affects fields recursively, but switches back to normal pruning behaviour if nested properties or additionalProperties are specified in the schema. This can either be true or undefined. False is forbidden.", + "type": "boolean" + }, + "x-kubernetes-validations": { + "description": "x-kubernetes-validations describes a list of validation rules written in the CEL expression language. This field is an alpha-level. Using this field requires the feature gate `CustomResourceValidationExpressions` to be enabled.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ValidationRule" + }, + "x-kubernetes-list-map-keys": [ + "rule" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "rule", + "x-kubernetes-patch-strategy": "merge" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrArray": { + "description": "JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrBool": { + "description": "JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSONSchemaPropsOrStringArray": { + "description": "JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array." + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReference": { + "description": "ServiceReference holds a reference to Service.legacy.k8s.io", + "type": "object", + "required": [ + "namespace", + "name" + ], + "properties": { + "name": { + "description": "name is the name of the service. Required", + "type": "string" + }, + "namespace": { + "description": "namespace is the namespace of the service. Required", + "type": "string" + }, + "path": { + "description": "path is an optional URL path at which the webhook will be contacted.", + "type": "string" + }, + "port": { + "description": "port is an optional service port at which the webhook will be contacted. `port` should be a valid port number (1-65535, inclusive). Defaults to 443 for backward compatibility.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ValidationRule": { + "description": "ValidationRule describes a validation rule written in the CEL expression language.", + "type": "object", + "required": [ + "rule" + ], + "properties": { + "fieldPath": { + "description": "fieldPath represents the field path returned when the validation fails. It must be a relative JSON path (i.e. with array notation) scoped to the location of this x-kubernetes-validations extension in the schema and refer to an existing field. e.g. when validation checks if a specific attribute `foo` under a map `testMap`, the fieldPath could be set to `.testMap.foo` If the validation checks two lists must have unique attributes, the fieldPath could be set to either of the list: e.g. `.testList` It does not support list numeric index. It supports child operation to refer to an existing field currently. Refer to [JSONPath support in Kubernetes](https://kubernetes.io/docs/reference/kubectl/jsonpath/) for more info. Numeric index of array is not supported. For field name which contains special characters, use `['specialName']` to refer the field name. e.g. for attribute `foo.34$` appears in a list `testList`, the fieldPath could be set to `.testList['foo.34$']`", + "type": "string" + }, + "message": { + "description": "Message represents the message displayed when validation fails. The message is required if the Rule contains line breaks. The message must not contain line breaks. If unset, the message is \"failed rule: {Rule}\". e.g. \"must be a URL with the host matching spec.host\"", + "type": "string" + }, + "messageExpression": { + "description": "MessageExpression declares a CEL expression that evaluates to the validation failure message that is returned when this rule fails. Since messageExpression is used as a failure message, it must evaluate to a string. If both message and messageExpression are present on a rule, then messageExpression will be used if validation fails. If messageExpression results in a runtime error, the runtime error is logged, and the validation failure message is produced as if the messageExpression field were unset. If messageExpression evaluates to an empty string, a string with only spaces, or a string that contains line breaks, then the validation failure message will also be produced as if the messageExpression field were unset, and the fact that messageExpression produced an empty string/string with only spaces/string with line breaks will be logged. messageExpression has access to all the same variables as the rule; the only difference is the return type. Example: \"x must be less than max (\"+string(self.max)+\")\"", + "type": "string" + }, + "optionalOldSelf": { + "description": "optionalOldSelf is used to opt a transition rule into evaluation even when the object is first created, or if the old object is missing the value.\n\nWhen enabled `oldSelf` will be a CEL optional whose value will be `None` if there is no old value, or when the object is initially created.\n\nYou may check for presence of oldSelf using `oldSelf.hasValue()` and unwrap it after checking using `oldSelf.value()`. Check the CEL documentation for Optional types for more information: https://pkg.go.dev/github.com/google/cel-go/cel#OptionalTypes\n\nMay not be set unless `oldSelf` is used in `rule`.", + "type": "boolean" + }, + "reason": { + "description": "reason provides a machine-readable validation failure reason that is returned to the caller when a request fails this validation rule. The HTTP status code returned to the caller will match the reason of the reason of the first failed validation rule. The currently supported reasons are: \"FieldValueInvalid\", \"FieldValueForbidden\", \"FieldValueRequired\", \"FieldValueDuplicate\". If not set, default to use \"FieldValueInvalid\". All future added reasons must be accepted by clients when reading this value and unknown reasons should be treated as FieldValueInvalid.", + "type": "string" + }, + "rule": { + "description": "Rule represents the expression which will be evaluated by CEL. ref: https://github.com/google/cel-spec The Rule is scoped to the location of the x-kubernetes-validations extension in the schema. The `self` variable in the CEL expression is bound to the scoped value. Example: - Rule scoped to the root of a resource with a status subresource: {\"rule\": \"self.status.actual \u003c= self.spec.maxDesired\"}\n\nIf the Rule is scoped to an object with properties, the accessible properties of the object are field selectable via `self.field` and field presence can be checked via `has(self.field)`. Null valued fields are treated as absent fields in CEL expressions. If the Rule is scoped to an object with additionalProperties (i.e. a map) the value of the map are accessible via `self[mapKey]`, map containment can be checked via `mapKey in self` and all entries of the map are accessible via CEL macros and functions such as `self.all(...)`. If the Rule is scoped to an array, the elements of the array are accessible via `self[i]` and also by macros and functions. If the Rule is scoped to a scalar, `self` is bound to the scalar value. Examples: - Rule scoped to a map of objects: {\"rule\": \"self.components['Widget'].priority \u003c 10\"} - Rule scoped to a list of integers: {\"rule\": \"self.values.all(value, value \u003e= 0 \u0026\u0026 value \u003c 100)\"} - Rule scoped to a string value: {\"rule\": \"self.startsWith('kube')\"}\n\nThe `apiVersion`, `kind`, `metadata.name` and `metadata.generateName` are always accessible from the root of the object and from any x-kubernetes-embedded-resource annotated objects. No other metadata properties are accessible.\n\nUnknown data preserved in custom resources via x-kubernetes-preserve-unknown-fields is not accessible in CEL expressions. This includes: - Unknown field values that are preserved by object schemas with x-kubernetes-preserve-unknown-fields. - Object properties where the property schema is of an \"unknown type\". An \"unknown type\" is recursively defined as:\n - A schema with no type and x-kubernetes-preserve-unknown-fields set to true\n - An array where the items schema is of an \"unknown type\"\n - An object where the additionalProperties schema is of an \"unknown type\"\n\nOnly property names of the form `[a-zA-Z_.-/][a-zA-Z0-9_.-/]*` are accessible. Accessible property names are escaped according to the following rules when accessed in the expression: - '__' escapes to '__underscores__' - '.' escapes to '__dot__' - '-' escapes to '__dash__' - '/' escapes to '__slash__' - Property names that exactly match a CEL RESERVED keyword escape to '__{keyword}__'. The keywords are:\n\t \"true\", \"false\", \"null\", \"in\", \"as\", \"break\", \"const\", \"continue\", \"else\", \"for\", \"function\", \"if\",\n\t \"import\", \"let\", \"loop\", \"package\", \"namespace\", \"return\".\nExamples:\n - Rule accessing a property named \"namespace\": {\"rule\": \"self.__namespace__ \u003e 0\"}\n - Rule accessing a property named \"x-prop\": {\"rule\": \"self.x__dash__prop \u003e 0\"}\n - Rule accessing a property named \"redact__d\": {\"rule\": \"self.redact__underscores__d \u003e 0\"}\n\nEquality on arrays with x-kubernetes-list-type of 'set' or 'map' ignores element order, i.e. [1, 2] == [2, 1]. Concatenation on arrays with x-kubernetes-list-type use the semantics of the list type:\n - 'set': `X + Y` performs a union where the array positions of all elements in `X` are preserved and\n non-intersecting elements in `Y` are appended, retaining their partial order.\n - 'map': `X + Y` performs a merge where the array positions of all keys in `X` are preserved but the values\n are overwritten by values in `Y` when the key sets of `X` and `Y` intersect. Elements in `Y` with\n non-intersecting keys are appended, retaining their partial order.\n\nIf `rule` makes use of the `oldSelf` variable it is implicitly a `transition rule`.\n\nBy default, the `oldSelf` variable is the same type as `self`. When `optionalOldSelf` is true, the `oldSelf` variable is a CEL optional\n variable whose value() is the same type as `self`.\nSee the documentation for the `optionalOldSelf` field for details.\n\nTransition rules by default are applied only on UPDATE requests and are skipped if an old value could not be found. You can opt a transition rule into unconditional evaluation by setting `optionalOldSelf` to true.", + "type": "string" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig": { + "description": "WebhookClientConfig contains the information to make a TLS connection with the webhook.", + "type": "object", + "properties": { + "caBundle": { + "description": "caBundle is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", + "type": "string", + "format": "byte" + }, + "service": { + "description": "service is a reference to the service for this webhook. Either service or url must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.ServiceReference" + }, + "url": { + "description": "url gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "type": "string" + } + } + }, + "io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookConversion": { + "description": "WebhookConversion describes how to call a conversion webhook", + "type": "object", + "required": [ + "conversionReviewVersions" + ], + "properties": { + "clientConfig": { + "description": "clientConfig is the instructions for how to call the webhook if strategy is `Webhook`.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.WebhookClientConfig" + }, + "conversionReviewVersions": { + "description": "conversionReviewVersions is an ordered list of preferred `ConversionReview` versions the Webhook expects. The API server will use the first version in the list which it supports. If none of the versions specified in this list are supported by API server, conversion will fail for the custom resource. If a persisted Webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "io.k8s.apimachinery.pkg.api.resource.Quantity": { + "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n``` \u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n\n\t(Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n\n\t(International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n\n\t(Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e ```\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n\n- No precision is lost - No fractional digits will be emitted - The exponent (or suffix) is as large as possible.\n\nThe sign will be omitted unless the number is negative.\n\nExamples:\n\n- 1.5 will be serialized as \"1500m\" - 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", + "type": "string" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup": { + "description": "APIGroup contains the name, the supported versions, and the preferred version of a group.", + "type": "object", + "required": [ + "name", + "versions" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "name is the name of the group.", + "type": "string" + }, + "preferredVersion": { + "description": "preferredVersion is the version preferred by the API server, which probably is the storage version.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery" + }, + "serverAddressByClientCIDRs": { + "description": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" + } + }, + "versions": { + "description": "versions are the versions supported in this group.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIGroup", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIGroupList": { + "description": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + "type": "object", + "required": [ + "groups" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "groups": { + "description": "groups is a list of APIGroup.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIGroup" + } + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIGroupList", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": { + "description": "APIResource specifies the name of a resource and whether it is namespaced.", + "type": "object", + "required": [ + "name", + "singularName", + "namespaced", + "kind", + "verbs" + ], + "properties": { + "categories": { + "description": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "type": "array", + "items": { + "type": "string" + } + }, + "group": { + "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "type": "string" + }, + "kind": { + "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "type": "string" + }, + "name": { + "description": "name is the plural name of the resource.", + "type": "string" + }, + "namespaced": { + "description": "namespaced indicates if a resource is namespaced or not.", + "type": "boolean" + }, + "shortNames": { + "description": "shortNames is a list of suggested short names of the resource.", + "type": "array", + "items": { + "type": "string" + } + }, + "singularName": { + "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "type": "string" + }, + "storageVersionHash": { + "description": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + "type": "string" + }, + "verbs": { + "description": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "type": "array", + "items": { + "type": "string" + } + }, + "version": { + "description": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList": { + "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "type": "object", + "required": [ + "groupVersion", + "resources" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "groupVersion": { + "description": "groupVersion is the group and version this APIResourceList is for.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "resources": { + "description": "resources contains the name of the resources and if they are namespaced.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIResourceList", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIVersions": { + "description": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + "type": "object", + "required": [ + "versions", + "serverAddressByClientCIDRs" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "serverAddressByClientCIDRs": { + "description": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR" + } + }, + "versions": { + "description": "versions are the api versions that are available.", + "type": "array", + "items": { + "type": "string" + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIVersions", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Condition": { + "description": "Condition contains details for one aspect of the current state of this API Resource.", + "type": "object", + "required": [ + "type", + "status", + "lastTransitionTime", + "reason", + "message" + ], + "properties": { + "lastTransitionTime": { + "description": "lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + }, + "message": { + "description": "message is a human readable message indicating details about the transition. This may be an empty string.", + "type": "string" + }, + "observedGeneration": { + "description": "observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.", + "type": "integer", + "format": "int64" + }, + "reason": { + "description": "reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.", + "type": "string" + }, + "status": { + "description": "status of the condition, one of True, False, Unknown.", + "type": "string" + }, + "type": { + "description": "type of condition in CamelCase or in foo.example.com/CamelCase.", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions": { + "description": "DeleteOptions may be provided when deleting an API object.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "dryRun": { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "type": "array", + "items": { + "type": "string" + } + }, + "gracePeriodSeconds": { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "type": "integer", + "format": "int64" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "orphanDependents": { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "type": "boolean" + }, + "preconditions": { + "description": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions" + }, + "propagationPolicy": { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1beta2" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2beta1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2beta2" + }, + { + "group": "batch", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "batch", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "discovery.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "discovery.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "events.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "events.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "extensions", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta2" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta3" + }, + { + "group": "imagepolicy.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "internal.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "policy", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "policy", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "resource.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha2" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery": { + "description": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + "type": "object", + "required": [ + "groupVersion", + "version" + ], + "properties": { + "groupVersion": { + "description": "groupVersion specifies the API group and version in the form \"group/version\"", + "type": "string" + }, + "version": { + "description": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelector": { + "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "type": "object", + "properties": { + "matchExpressions": { + "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement" + } + }, + "matchLabels": { + "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", "type": "object", - "properties": { - "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "type": "array", - "items": { - "description": "Maps a string key to a path within a volume.", - "type": "object", - "required": [ - "key", - "path" - ], - "properties": { - "key": { - "description": "The key to project.", - "type": "string" - }, - "mode": { - "description": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "path": { - "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", - "type": "string" - } - } - } - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined", - "type": "boolean" - } + "additionalProperties": { + "type": "string" + } + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement": { + "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "type": "object", + "required": [ + "key", + "operator" + ], + "properties": { + "key": { + "description": "key is the label key that the selector applies to.", + "type": "string" + }, + "operator": { + "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", + "type": "string" + }, + "values": { + "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", + "type": "array", + "items": { + "type": "string" } + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": { + "description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "type": "object", + "properties": { + "continue": { + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "type": "string" + }, + "remainingItemCount": { + "description": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", + "type": "integer", + "format": "int64" + }, + "resourceVersion": { + "description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "selfLink": { + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry": { + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "type": "string" }, - "csi": { - "description": "Represents a source location of a volume to mount, managed by an external CSI driver", - "type": "object", - "required": [ - "driver" - ], - "properties": { - "driver": { - "description": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", - "type": "string" - }, - "fsType": { - "description": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", - "type": "string" - }, - "nodePublishSecretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - }, - "readOnly": { - "description": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", - "type": "boolean" - }, - "volumeAttributes": { - "description": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } + "fieldsType": { + "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "type": "string" }, - "downwardAPI": { - "description": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", - "type": "object", - "properties": { - "defaultMode": { - "description": "Optional: mode bits to use on created files by default. Must be a Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "items": { - "description": "Items is a list of downward API volume file", - "type": "array", - "items": { - "description": "DownwardAPIVolumeFile represents information to create the file containing the pod field", - "type": "object", - "required": [ - "path" - ], - "properties": { - "fieldRef": { - "description": "ObjectFieldSelector selects an APIVersioned field of an object.", - "type": "object", - "required": [ - "fieldPath" - ], - "properties": { - "apiVersion": { - "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", - "type": "string" - }, - "fieldPath": { - "description": "Path of the field to select in the specified API version.", - "type": "string" - } - } - }, - "mode": { - "description": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "path": { - "description": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - "type": "string" - }, - "resourceFieldRef": { - "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", - "type": "object", - "required": [ - "resource" - ], - "properties": { - "containerName": { - "description": "Container name: required for volumes, optional for env vars", - "type": "string" - }, - "divisor": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - }, - "resource": { - "description": "Required: resource to select", - "type": "string" - } - } - } - } - } - } - } + "fieldsV1": { + "description": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1" }, - "emptyDir": { - "description": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - "type": "object", - "properties": { - "medium": { - "description": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "type": "string" - }, - "sizeLimit": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - } - } + "manager": { + "description": "Manager is an identifier of the workflow managing these fields.", + "type": "string" }, - "ephemeral": { - "description": "Represents an ephemeral volume that is handled by a normal storage driver.", - "type": "object", - "properties": { - "readOnly": { - "description": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", - "type": "boolean" - }, - "volumeClaimTemplate": { - "description": "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.", - "type": "object", - "required": [ - "spec" - ], - "properties": { - "metadata": { - "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", - "type": "object", - "properties": { - "annotations": { - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "clusterName": { - "description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "type": "string" - }, - "creationTimestamp": { - "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - "type": "string", - "format": "date-time" - }, - "deletionGracePeriodSeconds": { - "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "type": "integer", - "format": "int64" - }, - "deletionTimestamp": { - "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - "type": "string", - "format": "date-time" - }, - "finalizers": { - "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", - "type": "array", - "items": { - "type": "string" - }, - "x-kubernetes-patch-strategy": "merge" - }, - "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", - "type": "string" - }, - "generation": { - "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "type": "integer", - "format": "int64" - }, - "labels": { - "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "managedFields": { - "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", - "type": "array", - "items": { - "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", - "type": "object", - "properties": { - "apiVersion": { - "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", - "type": "string" - }, - "fieldsType": { - "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", - "type": "string" - }, - "fieldsV1": { - "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", - "type": "object" - }, - "manager": { - "description": "Manager is an identifier of the workflow managing these fields.", - "type": "string" - }, - "operation": { - "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", - "type": "string" - }, - "time": { - "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - "type": "string", - "format": "date-time" - } - } - } - }, - "name": { - "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "type": "string" - }, - "namespace": { - "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "type": "string" - }, - "ownerReferences": { - "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "type": "array", - "items": { - "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", - "type": "object", - "required": [ - "apiVersion", - "kind", - "name", - "uid" - ], - "properties": { - "apiVersion": { - "description": "API version of the referent.", - "type": "string" - }, - "blockOwnerDeletion": { - "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", - "type": "boolean" - }, - "controller": { - "description": "If true, this reference points to the managing controller.", - "type": "boolean" - }, - "kind": { - "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "type": "string" - }, - "uid": { - "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "type": "string" - } - } - }, - "x-kubernetes-patch-merge-key": "uid", - "x-kubernetes-patch-strategy": "merge" - }, - "resourceVersion": { - "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", - "type": "string" - }, - "selfLink": { - "description": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", - "type": "string" - }, - "uid": { - "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "type": "string" - } - } - }, - "spec": { - "description": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "type": "object", - "properties": { - "accessModes": { - "description": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "type": "array", - "items": { - "type": "string" - } - }, - "dataSource": { - "description": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", - "type": "object", - "required": [ - "kind", - "name" - ], - "properties": { - "apiGroup": { - "description": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "type": "string" - }, - "kind": { - "description": "Kind is the type of resource being referenced", - "type": "string" - }, - "name": { - "description": "Name is the name of resource being referenced", - "type": "string" - } - } - }, - "resources": { - "description": "ResourceRequirements describes the compute resource requirements.", - "type": "object", - "properties": { - "limits": { - "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - } - }, - "requests": { - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - } - } - } - }, - "selector": { - "description": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", - "type": "object", - "properties": { - "matchExpressions": { - "description": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", - "type": "array", - "items": { - "description": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "type": "object", - "required": [ - "key", - "operator" - ], - "properties": { - "key": { - "description": "key is the label key that the selector applies to.", - "type": "string", - "x-kubernetes-patch-merge-key": "key", - "x-kubernetes-patch-strategy": "merge" - }, - "operator": { - "description": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", - "type": "string" - }, - "values": { - "description": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", - "type": "array", - "items": { - "type": "string" - } - } - } - } - }, - "matchLabels": { - "description": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", - "type": "object", - "additionalProperties": { - "type": "string" - } - } - } - }, - "storageClassName": { - "description": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "type": "string" - }, - "volumeMode": { - "description": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "type": "string" - }, - "volumeName": { - "description": "VolumeName is the binding reference to the PersistentVolume backing this claim.", - "type": "string" - } - } - } - } - } - } + "operation": { + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "type": "string" }, - "fc": { - "description": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", - "type": "object", - "properties": { - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "type": "string" - }, - "lun": { - "description": "Optional: FC target lun number", - "type": "integer", - "format": "int32" - }, - "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "type": "boolean" - }, - "targetWWNs": { - "description": "Optional: FC target worldwide names (WWNs)", - "type": "array", - "items": { - "type": "string" - } - }, - "wwids": { - "description": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", - "type": "array", - "items": { - "type": "string" - } - } - } + "subresource": { + "description": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.", + "type": "string" }, - "flexVolume": { - "description": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "time": { + "description": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.MicroTime": { + "description": "MicroTime is version of Time with microsecond level precision.", + "type": "string", + "format": "date-time" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": { + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "type": "object", + "properties": { + "annotations": { + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations", "type": "object", - "required": [ - "driver" - ], - "properties": { - "driver": { - "description": "Driver is the name of the driver to use for this volume.", - "type": "string" - }, - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - "type": "string" - }, - "options": { - "description": "Optional: Extra command options if any.", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "readOnly": { - "description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "type": "boolean" - }, - "secretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - } + "additionalProperties": { + "type": "string" } }, - "flocker": { - "description": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", - "type": "object", - "properties": { - "datasetName": { - "description": "Name of the dataset stored as metadata -\u003e name on the dataset for Flocker should be considered as deprecated", - "type": "string" - }, - "datasetUUID": { - "description": "UUID of the dataset. This is unique identifier of a Flocker dataset", - "type": "string" - } - } + "creationTimestamp": { + "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, - "gcePersistentDisk": { - "description": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - "type": "object", - "required": [ - "pdName" - ], - "properties": { - "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "type": "string" - }, - "partition": { - "description": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "type": "integer", - "format": "int32" - }, - "pdName": { - "description": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "type": "string" - }, - "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "type": "boolean" - } - } + "deletionGracePeriodSeconds": { + "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "type": "integer", + "format": "int64" }, - "gitRepo": { - "description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - "type": "object", - "required": [ - "repository" - ], - "properties": { - "directory": { - "description": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", - "type": "string" - }, - "repository": { - "description": "Repository URL", - "type": "string" - }, - "revision": { - "description": "Commit hash for the specified revision.", - "type": "string" - } - } + "deletionTimestamp": { + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, - "glusterfs": { - "description": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "type": "object", - "required": [ - "endpoints", - "path" - ], - "properties": { - "endpoints": { - "description": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "type": "string" - }, - "path": { - "description": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "type": "string" - }, - "readOnly": { - "description": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "type": "boolean" - } - } + "finalizers": { + "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", + "type": "array", + "items": { + "type": "string" + }, + "x-kubernetes-patch-strategy": "merge" }, - "hostPath": { - "description": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "generateName": { + "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "type": "string" + }, + "generation": { + "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "type": "integer", + "format": "int64" + }, + "labels": { + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels", "type": "object", - "required": [ - "path" - ], - "properties": { - "path": { - "description": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "type": "string" - }, - "type": { - "description": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "type": "string" - } + "additionalProperties": { + "type": "string" } }, - "iscsi": { - "description": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "type": "object", - "required": [ - "targetPortal", - "iqn", - "lun" - ], - "properties": { - "chapAuthDiscovery": { - "description": "whether support iSCSI Discovery CHAP authentication", - "type": "boolean" - }, - "chapAuthSession": { - "description": "whether support iSCSI Session CHAP authentication", - "type": "boolean" - }, - "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", - "type": "string" - }, - "initiatorName": { - "description": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface \u003ctarget portal\u003e:\u003cvolume name\u003e will be created for the connection.", - "type": "string" - }, - "iqn": { - "description": "Target iSCSI Qualified Name.", - "type": "string" - }, - "iscsiInterface": { - "description": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", - "type": "string" - }, - "lun": { - "description": "iSCSI Target Lun number.", - "type": "integer", - "format": "int32" - }, - "portals": { - "description": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "type": "array", - "items": { - "type": "string" - } - }, - "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "type": "boolean" - }, - "secretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - }, - "targetPortal": { - "description": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "type": "string" - } + "managedFields": { + "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry" } }, "name": { - "description": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", "type": "string" }, - "nfs": { - "description": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - "type": "object", - "required": [ - "server", - "path" - ], - "properties": { - "path": { - "description": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "type": "string" - }, - "readOnly": { - "description": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "type": "boolean" - }, - "server": { - "description": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "type": "string" - } - } + "namespace": { + "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces", + "type": "string" }, - "persistentVolumeClaim": { - "description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - "type": "object", - "required": [ - "claimName" - ], - "properties": { - "claimName": { - "description": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "type": "string" - }, - "readOnly": { - "description": "Will force the ReadOnly setting in VolumeMounts. Default false.", - "type": "boolean" - } - } + "ownerReferences": { + "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference" + }, + "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-patch-strategy": "merge" }, - "photonPersistentDisk": { - "description": "Represents a Photon Controller persistent disk resource.", - "type": "object", - "required": [ - "pdID" - ], - "properties": { - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "type": "string" - }, - "pdID": { - "description": "ID that identifies Photon Controller persistent disk", - "type": "string" - } - } + "resourceVersion": { + "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" }, - "portworxVolume": { - "description": "PortworxVolumeSource represents a Portworx volume resource.", - "type": "object", - "required": [ - "volumeID" - ], - "properties": { - "fsType": { - "description": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "type": "string" - }, - "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "type": "boolean" - }, - "volumeID": { - "description": "VolumeID uniquely identifies a Portworx volume", - "type": "string" - } - } + "selfLink": { + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "type": "string" }, - "projected": { - "description": "Represents a projected volume source", - "type": "object", - "required": [ - "sources" - ], - "properties": { - "defaultMode": { - "description": "Mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "sources": { - "description": "list of volume projections", - "type": "array", - "items": { - "description": "Projection that may be projected along with other supported volume types", - "type": "object", - "properties": { - "configMap": { - "description": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", - "type": "object", - "properties": { - "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "type": "array", - "items": { - "description": "Maps a string key to a path within a volume.", - "type": "object", - "required": [ - "key", - "path" - ], - "properties": { - "key": { - "description": "The key to project.", - "type": "string" - }, - "mode": { - "description": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "path": { - "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", - "type": "string" - } - } - } - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - }, - "optional": { - "description": "Specify whether the ConfigMap or its keys must be defined", - "type": "boolean" - } - } - }, - "downwardAPI": { - "description": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", - "type": "object", - "properties": { - "items": { - "description": "Items is a list of DownwardAPIVolume file", - "type": "array", - "items": { - "description": "DownwardAPIVolumeFile represents information to create the file containing the pod field", - "type": "object", - "required": [ - "path" - ], - "properties": { - "fieldRef": { - "description": "ObjectFieldSelector selects an APIVersioned field of an object.", - "type": "object", - "required": [ - "fieldPath" - ], - "properties": { - "apiVersion": { - "description": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", - "type": "string" - }, - "fieldPath": { - "description": "Path of the field to select in the specified API version.", - "type": "string" - } - } - }, - "mode": { - "description": "Optional: mode bits used to set permissions on this file, must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "path": { - "description": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - "type": "string" - }, - "resourceFieldRef": { - "description": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", - "type": "object", - "required": [ - "resource" - ], - "properties": { - "containerName": { - "description": "Container name: required for volumes, optional for env vars", - "type": "string" - }, - "divisor": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" - }, - "resource": { - "description": "Required: resource to select", - "type": "string" - } - } - } - } - } - } - } - }, - "secret": { - "description": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", - "type": "object", - "properties": { - "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "type": "array", - "items": { - "description": "Maps a string key to a path within a volume.", - "type": "object", - "required": [ - "key", - "path" - ], - "properties": { - "key": { - "description": "The key to project.", - "type": "string" - }, - "mode": { - "description": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "path": { - "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", - "type": "string" - } - } - } - }, - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - }, - "optional": { - "description": "Specify whether the Secret or its key must be defined", - "type": "boolean" - } - } - }, - "serviceAccountToken": { - "description": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", - "type": "object", - "required": [ - "path" - ], - "properties": { - "audience": { - "description": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", - "type": "string" - }, - "expirationSeconds": { - "description": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", - "type": "integer", - "format": "int64" - }, - "path": { - "description": "Path is the path relative to the mount point of the file to project the token into.", - "type": "string" - } - } - } - } - } - } - } + "uid": { + "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference": { + "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "type": "object", + "required": [ + "apiVersion", + "kind", + "name", + "uid" + ], + "properties": { + "apiVersion": { + "description": "API version of the referent.", + "type": "string" }, - "quobyte": { - "description": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", - "type": "object", - "required": [ - "registry", - "volume" - ], - "properties": { - "group": { - "description": "Group to map volume access to Default is no group", - "type": "string" - }, - "readOnly": { - "description": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", - "type": "boolean" - }, - "registry": { - "description": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", - "type": "string" - }, - "tenant": { - "description": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", - "type": "string" - }, - "user": { - "description": "User to map volume access to Defaults to serivceaccount user", - "type": "string" - }, - "volume": { - "description": "Volume is a string that references an already created Quobyte volume by name.", - "type": "string" - } - } + "blockOwnerDeletion": { + "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + "type": "boolean" + }, + "controller": { + "description": "If true, this reference points to the managing controller.", + "type": "boolean" + }, + "kind": { + "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names", + "type": "string" + }, + "uid": { + "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" + } + }, + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Patch": { + "description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions": { + "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "type": "object", + "properties": { + "resourceVersion": { + "description": "Specifies the target ResourceVersion", + "type": "string" + }, + "uid": { + "description": "Specifies the target UID.", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR": { + "description": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + "type": "object", + "required": [ + "clientCIDR", + "serverAddress" + ], + "properties": { + "clientCIDR": { + "description": "The CIDR with which clients can match their IP to figure out the server address that they should use.", + "type": "string" + }, + "serverAddress": { + "description": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Status": { + "description": "Status is a return value for calls that don't return other objects.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "code": { + "description": "Suggested HTTP return code for this status, 0 if not set.", + "type": "integer", + "format": "int32" + }, + "details": { + "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "message": { + "description": "A human-readable description of the status of this operation.", + "type": "string" + }, + "metadata": { + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + }, + "reason": { + "description": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "type": "string" + }, + "status": { + "description": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Status", + "version": "v1" + }, + { + "group": "resource.k8s.io", + "kind": "Status", + "version": "v1alpha2" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause": { + "description": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "type": "object", + "properties": { + "field": { + "description": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", + "type": "string" }, - "rbd": { - "description": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "type": "object", - "required": [ - "monitors", - "image" - ], - "properties": { - "fsType": { - "description": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "type": "string" - }, - "image": { - "description": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "type": "string" - }, - "keyring": { - "description": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "type": "string" - }, - "monitors": { - "description": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "type": "array", - "items": { - "type": "string" - } - }, - "pool": { - "description": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "type": "string" - }, - "readOnly": { - "description": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "type": "boolean" - }, - "secretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - }, - "user": { - "description": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "type": "string" - } - } + "message": { + "description": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "type": "string" }, - "scaleIO": { - "description": "ScaleIOVolumeSource represents a persistent ScaleIO volume", - "type": "object", - "required": [ - "gateway", - "system", - "secretRef" - ], - "properties": { - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", - "type": "string" - }, - "gateway": { - "description": "The host address of the ScaleIO API Gateway.", - "type": "string" - }, - "protectionDomain": { - "description": "The name of the ScaleIO Protection Domain for the configured storage.", - "type": "string" - }, - "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "type": "boolean" - }, - "secretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - }, - "sslEnabled": { - "description": "Flag to enable/disable SSL communication with Gateway, default false", - "type": "boolean" - }, - "storageMode": { - "description": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", - "type": "string" - }, - "storagePool": { - "description": "The ScaleIO Storage Pool associated with the protection domain.", - "type": "string" - }, - "system": { - "description": "The name of the storage system as configured in ScaleIO.", - "type": "string" - }, - "volumeName": { - "description": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - "type": "string" - } + "reason": { + "description": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "type": "string" + } + } + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails": { + "description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "type": "object", + "properties": { + "causes": { + "description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "type": "array", + "items": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause" } }, - "secret": { - "description": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - "type": "object", - "properties": { - "defaultMode": { - "description": "Optional: mode bits used to set permissions on created files by default. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "items": { - "description": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "type": "array", - "items": { - "description": "Maps a string key to a path within a volume.", - "type": "object", - "required": [ - "key", - "path" - ], - "properties": { - "key": { - "description": "The key to project.", - "type": "string" - }, - "mode": { - "description": "Optional: mode bits used to set permissions on this file. Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "type": "integer", - "format": "int32" - }, - "path": { - "description": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", - "type": "string" - } - } - } - }, - "optional": { - "description": "Specify whether the Secret or its keys must be defined", - "type": "boolean" - }, - "secretName": { - "description": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - "type": "string" - } - } + "group": { + "description": "The group attribute of the resource associated with the status StatusReason.", + "type": "string" }, - "storageos": { - "description": "Represents a StorageOS persistent volume resource.", - "type": "object", - "properties": { - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "type": "string" - }, - "readOnly": { - "description": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "type": "boolean" - }, - "secretRef": { - "description": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "type": "object", - "properties": { - "name": { - "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "type": "string" - } - } - }, - "volumeName": { - "description": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", - "type": "string" - }, - "volumeNamespace": { - "description": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", - "type": "string" - } - } + "kind": { + "description": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" }, - "vsphereVolume": { - "description": "Represents a vSphere volume resource.", - "type": "object", - "required": [ - "volumePath" - ], - "properties": { - "fsType": { - "description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "type": "string" - }, - "storagePolicyID": { - "description": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", - "type": "string" - }, - "storagePolicyName": { - "description": "Storage Policy Based Management (SPBM) profile name.", - "type": "string" - }, - "volumePath": { - "description": "Path that identifies vSphere volume vmdk", - "type": "string" - } - } + "name": { + "description": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "type": "string" + }, + "retryAfterSeconds": { + "description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", + "type": "integer", + "format": "int32" + }, + "uid": { + "description": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids", + "type": "string" } } }, - "io.k8s.apimachinery.pkg.api.resource.Quantity": { - "description": "Quantity is a fixed-point representation of a number. It provides convenient marshaling/unmarshaling in JSON and YAML, in addition to String() and AsInt64() accessors.\n\nThe serialization format is:\n\n\u003cquantity\u003e ::= \u003csignedNumber\u003e\u003csuffix\u003e\n (Note that \u003csuffix\u003e may be empty, from the \"\" case in \u003cdecimalSI\u003e.)\n\u003cdigit\u003e ::= 0 | 1 | ... | 9 \u003cdigits\u003e ::= \u003cdigit\u003e | \u003cdigit\u003e\u003cdigits\u003e \u003cnumber\u003e ::= \u003cdigits\u003e | \u003cdigits\u003e.\u003cdigits\u003e | \u003cdigits\u003e. | .\u003cdigits\u003e \u003csign\u003e ::= \"+\" | \"-\" \u003csignedNumber\u003e ::= \u003cnumber\u003e | \u003csign\u003e\u003cnumber\u003e \u003csuffix\u003e ::= \u003cbinarySI\u003e | \u003cdecimalExponent\u003e | \u003cdecimalSI\u003e \u003cbinarySI\u003e ::= Ki | Mi | Gi | Ti | Pi | Ei\n (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html)\n\u003cdecimalSI\u003e ::= m | \"\" | k | M | G | T | P | E\n (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.)\n\u003cdecimalExponent\u003e ::= \"e\" \u003csignedNumber\u003e | \"E\" \u003csignedNumber\u003e\n\nNo matter which of the three exponent forms is used, no quantity may represent a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal places. Numbers larger or more precise will be capped or rounded up. (E.g.: 0.1m will rounded up to 1m.) This may be extended in the future if we require larger or smaller quantities.\n\nWhen a Quantity is parsed from a string, it will remember the type of suffix it had, and will use the same type again when it is serialized.\n\nBefore serializing, Quantity will be put in \"canonical form\". This means that Exponent/suffix will be adjusted up or down (with a corresponding increase or decrease in Mantissa) such that:\n a. No precision is lost\n b. No fractional digits will be emitted\n c. The exponent (or suffix) is as large as possible.\nThe sign will be omitted unless the number is negative.\n\nExamples:\n 1.5 will be serialized as \"1500m\"\n 1.5Gi will be serialized as \"1536Mi\"\n\nNote that the quantity will NEVER be internally represented by a floating point number. That is the whole point of this exercise.\n\nNon-canonical values will still parse as long as they are well formed, but will be re-emitted in their canonical form. (So always use canonical form, or don't diff.)\n\nThis format is intended to make it difficult to use these numbers without writing some sort of special handling code in the hopes that that will cause implementors to also use a fixed point implementation.", - "type": "string" + "io.k8s.apimachinery.pkg.apis.meta.v1.Time": { + "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", + "type": "string", + "format": "date-time" }, - "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": { - "description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent": { + "description": "Event represents a single event to a watched resource.", "type": "object", + "required": [ + "type", + "object" + ], "properties": { - "continue": { - "description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "object": { + "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.runtime.RawExtension" + }, + "type": { + "type": "string" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1beta2" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2beta1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2beta2" + }, + { + "group": "batch", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "batch", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "discovery.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "discovery.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "events.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "events.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "extensions", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta2" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta3" + }, + { + "group": "imagepolicy.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "internal.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "policy", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "policy", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "resource.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha2" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + } + ] + }, + "io.k8s.apimachinery.pkg.runtime.RawExtension": { + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "type": "object" + }, + "io.k8s.apimachinery.pkg.util.intstr.IntOrString": { + "description": "IntOrString is a type that can hold an int32 or a string. When used in JSON or YAML marshalling and unmarshalling, it produces or consumes the inner type. This allows you to have, for example, a JSON field that can accept a name or number.", + "type": "string", + "format": "int-or-string" + }, + "io.k8s.apimachinery.pkg.version.Info": { + "description": "Info contains versioning information. how we'll want to distribute that information.", + "type": "object", + "required": [ + "major", + "minor", + "gitVersion", + "gitCommit", + "gitTreeState", + "buildDate", + "goVersion", + "compiler", + "platform" + ], + "properties": { + "buildDate": { "type": "string" }, - "remainingItemCount": { - "description": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", - "type": "integer", - "format": "int64" + "compiler": { + "type": "string" }, - "resourceVersion": { - "description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "gitCommit": { "type": "string" }, - "selfLink": { - "description": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "gitTreeState": { + "type": "string" + }, + "gitVersion": { + "type": "string" + }, + "goVersion": { + "type": "string" + }, + "major": { + "type": "string" + }, + "minor": { + "type": "string" + }, + "platform": { "type": "string" } } }, - "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": { - "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService": { + "description": "APIService represents a server for a particular GroupVersion. Name must be \"version.group\".", "type": "object", "properties": { - "annotations": { - "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", - "type": "object", - "additionalProperties": { - "type": "string" - } + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" }, - "clusterName": { - "description": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, - "creationTimestamp": { - "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - "type": "string", - "format": "date-time" + "metadata": { + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" }, - "deletionGracePeriodSeconds": { - "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", - "type": "integer", - "format": "int64" + "spec": { + "description": "Spec contains information for locating and communicating with a server", + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec" }, - "deletionTimestamp": { - "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - "type": "string", - "format": "date-time" + "status": { + "description": "Status contains derived information about an API server", + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIService", + "version": "v1" + } + ] + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition": { + "description": "APIServiceCondition describes the state of an APIService at a particular point", + "type": "object", + "required": [ + "type", + "status" + ], + "properties": { + "lastTransitionTime": { + "description": "Last time the condition transitioned from one status to another.", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Time" }, - "finalizers": { - "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", - "type": "array", - "items": { - "type": "string" - }, - "x-kubernetes-patch-strategy": "merge" + "message": { + "description": "Human-readable message indicating details about last transition.", + "type": "string" }, - "generateName": { - "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "reason": { + "description": "Unique, one-word, CamelCase reason for the condition's last transition.", "type": "string" }, - "generation": { - "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "type": "integer", - "format": "int64" + "status": { + "description": "Status is the status of the condition. Can be True, False, Unknown.", + "type": "string" }, - "labels": { - "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", - "type": "object", - "additionalProperties": { - "type": "string" - } + "type": { + "description": "Type is the type of the condition.", + "type": "string" + } + } + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList": { + "description": "APIServiceList is a list of APIService objects.", + "type": "object", + "required": [ + "items" + ], + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" }, - "managedFields": { - "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", + "items": { + "description": "Items is the list of APIService", "type": "array", "items": { - "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", - "type": "object", - "properties": { - "apiVersion": { - "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", - "type": "string" - }, - "fieldsType": { - "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", - "type": "string" - }, - "fieldsV1": { - "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:\u003cname\u003e', where \u003cname\u003e is the name of a field in a struct, or key in a map 'v:\u003cvalue\u003e', where \u003cvalue\u003e is the exact json formatted value of a list item 'i:\u003cindex\u003e', where \u003cindex\u003e is position of a item in a list 'k:\u003ckeys\u003e', where \u003ckeys\u003e is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", - "type": "object" - }, - "manager": { - "description": "Manager is an identifier of the workflow managing these fields.", - "type": "string" - }, - "operation": { - "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", - "type": "string" - }, - "time": { - "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - "type": "string", - "format": "date-time" - } - } + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService" } }, - "name": { - "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", "type": "string" }, - "namespace": { - "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "metadata": { + "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "apiregistration.k8s.io", + "kind": "APIServiceList", + "version": "v1" + } + ] + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec": { + "description": "APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.", + "type": "object", + "required": [ + "groupPriorityMinimum", + "versionPriority" + ], + "properties": { + "caBundle": { + "description": "CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.", + "type": "string", + "format": "byte", + "x-kubernetes-list-type": "atomic" + }, + "group": { + "description": "Group is the API group name this server hosts", "type": "string" }, - "ownerReferences": { - "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "groupPriorityMinimum": { + "description": "GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMinimum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s", + "type": "integer", + "format": "int32" + }, + "insecureSkipTLSVerify": { + "description": "InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.", + "type": "boolean" + }, + "service": { + "description": "Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled.", + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference" + }, + "version": { + "description": "Version is the API version this server hosts. For example, \"v1\"", + "type": "string" + }, + "versionPriority": { + "description": "VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \"kube-like\", it will sort above non \"kube-like\" version strings, which are ordered lexicographically. \"Kube-like\" versions start with a \"v\", then are followed by a number (the major version), then optionally the string \"alpha\" or \"beta\" and another number (the minor version). These are sorted first by GA \u003e beta \u003e alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.", + "type": "integer", + "format": "int32" + } + } + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus": { + "description": "APIServiceStatus contains derived information about an API server", + "type": "object", + "properties": { + "conditions": { + "description": "Current service state of apiService.", "type": "array", "items": { - "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", - "type": "object", - "required": [ - "apiVersion", - "kind", - "name", - "uid" - ], - "properties": { - "apiVersion": { - "description": "API version of the referent.", - "type": "string" - }, - "blockOwnerDeletion": { - "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", - "type": "boolean" - }, - "controller": { - "description": "If true, this reference points to the managing controller.", - "type": "boolean" - }, - "kind": { - "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "name": { - "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "type": "string" - }, - "uid": { - "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "type": "string" - } - } + "$ref": "#/definitions/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition" }, - "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-list-map-keys": [ + "type" + ], + "x-kubernetes-list-type": "map", + "x-kubernetes-patch-merge-key": "type", "x-kubernetes-patch-strategy": "merge" - }, - "resourceVersion": { - "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + } + } + }, + "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference": { + "description": "ServiceReference holds a reference to Service.legacy.k8s.io", + "type": "object", + "properties": { + "name": { + "description": "Name is the name of the service", "type": "string" }, - "selfLink": { - "description": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "namespace": { + "description": "Namespace is the namespace of the service", "type": "string" }, - "uid": { - "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "type": "string" + "port": { + "description": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", + "type": "integer", + "format": "int32" } } - }, - "io.k8s.apimachinery.pkg.apis.meta.v1.Time": { - "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", - "type": "string", - "format": "date-time" } } } \ No newline at end of file diff --git a/api/sensor.html b/api/sensor.html index e67fbeaacc..0b152aee7b 100644 --- a/api/sensor.html +++ b/api/sensor.html @@ -42,26 +42,28 @@

AWSLambdaTrigger

@@ -125,6 +127,18 @@

AWSLambdaTrigger

+ + + +
+Field + +Description +
+WebhookContext
+ WebhookContext +

-DeprecatedServerCertPath refers the file that contains the cert. +(Members of WebhookContext are embedded into this type.)

-serverKeyPath
string +filter
+ EventSourceFilter +
+(Optional)

-DeprecatedServerKeyPath refers the file that contains private key +Filter

accessKey
- + Kubernetes core/v1.SecretKeySelector
-

AccessKey refers K8 secret containing aws access key

+(Optional) +

AccessKey refers K8s secret containing aws access key

secretKey
- + Kubernetes core/v1.SecretKeySelector
-

SecretKey refers K8 secret containing aws secret key

+(Optional) +

SecretKey refers K8s secret containing aws secret key

+roleARN
+ +string + +
+(Optional) +

RoleARN is the Amazon Resource Name (ARN) of the role to assume.

+

ArgoWorkflowOperation @@ -163,7 +177,7 @@

ArgoWorkflowTrigger -

Source of the K8 resource file(s)

+

Source of the K8s resource file(s)

@@ -196,18 +210,13 @@

ArgoWorkflowTrigger -GroupVersionResource
+args
- -Kubernetes meta/v1.GroupVersionResource - +[]string -

-(Members of GroupVersionResource are embedded into this type.) -

-

The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource

+

Args is the list of arguments to pass to the argo CLI

@@ -282,7 +291,7 @@

ArtifactLocation configmap
- + Kubernetes core/v1.ConfigMapKeySelector @@ -360,7 +369,7 @@

AzureEventHubsTrigger sharedAccessKeyName
- + Kubernetes core/v1.SecretKeySelector @@ -373,7 +382,7 @@

AzureEventHubsTrigger sharedAccessKey
- + Kubernetes core/v1.SecretKeySelector @@ -412,6 +421,110 @@

AzureEventHubsTrigger +

AzureServiceBusTrigger +

+

+(Appears on: +TriggerTemplate) +

+

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+connectionString
+ + +Kubernetes core/v1.SecretKeySelector + + +
+

ConnectionString is the connection string for the Azure Service Bus

+
+queueName
+ +string + +
+

QueueName is the name of the Azure Service Bus Queue

+
+topicName
+ +string + +
+

TopicName is the name of the Azure Service Bus Topic

+
+subscriptionName
+ +string + +
+

SubscriptionName is the name of the Azure Service Bus Topic Subscription

+
+tls
+ +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig + +
+(Optional) +

TLS configuration for the service bus client

+
+payload
+ + +[]TriggerParameter + + +
+

Payload is the list of key-value extracted from an event payload to construct the request payload.

+
+parameters
+ + +[]TriggerParameter + + +
+(Optional) +

Parameters is the list of key-value extracted from event’s payload that are applied to +the trigger resource.

+

Comparator (string alias)

@@ -421,6 +534,77 @@

Comparator

Comparator refers to the comparator operator for a data filter

+

ConditionsResetByTime +

+

+(Appears on: +ConditionsResetCriteria) +

+

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+cron
+ +string + +
+

Cron is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron

+
+timezone
+ +string + +
+(Optional) +
+

ConditionsResetCriteria +

+

+(Appears on: +TriggerTemplate) +

+

+

+ + + + + + + + + + + + + +
FieldDescription
+byTime
+ + +ConditionsResetByTime + + +
+

Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron

+

CustomTrigger

@@ -464,7 +648,7 @@

CustomTrigger certSecret
- + Kubernetes core/v1.SecretKeySelector @@ -525,18 +709,6 @@

CustomTrigger

Payload is the list of key-value extracted from an event payload to construct the request payload.

- - -certFilePath
- -string - - - -

DeprecatedCertFilePath is path to the cert file within sensor for secure connection between sensor and custom trigger gRPC server. -Deprecated: will be removed in v1.5, use CertSecret instead

- -

DataFilter @@ -631,14 +803,14 @@

DataFilter -

DependencyGroup +

EmailTrigger

(Appears on: -SensorSpec) +TriggerTemplate)

-

DependencyGroup is the group of dependencies

+

EmailTrigger refers to the specification of the email notification trigger.

@@ -650,24 +822,115 @@

DependencyGroup

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -797,7 +1060,7 @@

EventContext

+ + + + + + + +
-name
+parameters
+ + +[]TriggerParameter + + +
+(Optional) +

Parameters is the list of key-value extracted from event’s payload that are applied to +the trigger resource.

+
+username
string
-

Name of the group

+(Optional) +

Username refers to the username used to connect to the smtp server.

-dependencies
+smtpPassword
+ + +Kubernetes core/v1.SecretKeySelector + + +
+(Optional) +

SMTPPassword refers to the Kubernetes secret that holds the smtp password used to connect to smtp server.

+
+host
+ +string + +
+

Host refers to the smtp host url to which email is send.

+
+port
+ +int32 + +
+(Optional) +

Port refers to the smtp server port to which email is send. +Defaults to 0.

+
+to
[]string
-

Dependencies of events

+(Optional) +

To refers to the email addresses to which the emails are send.

+
+from
+ +string + +
+(Optional) +

From refers to the address from which the email is send from.

+
+subject
+ +string + +
+(Optional) +

Subject refers to the subject line for the email send.

+
+body
+ +string + +
+(Optional) +

Body refers to the body/content of the email send.

time
- + Kubernetes meta/v1.Time @@ -871,6 +1134,34 @@

EventDependency

Filters and rules governing toleration of success and constraints on the context and data of an event

+transform
+ + +EventDependencyTransformer + + +
+

Transform transforms the event data

+
+filtersLogicalOperator
+ + +LogicalOperator + + +
+

FiltersLogicalOperator defines how different filters are evaluated together. +Available values: and (&&), or (||) +Is optional and if left blank treated as and (&&).

+

EventDependencyFilter @@ -918,28 +1209,112 @@

EventDependencyFilter -data
+data
+ + +[]DataFilter + + + + +

Data filter constraints with escalation

+ + + + +exprs
+ + +[]ExprFilter + + + + +

Exprs contains the list of expressions evaluated against the event payload.

+ + + + +dataLogicalOperator
+ + +LogicalOperator + + + + +

DataLogicalOperator defines how multiple Data filters (if defined) are evaluated together. +Available values: and (&&), or (||) +Is optional and if left blank treated as and (&&).

+ + + + +exprLogicalOperator
+ + +LogicalOperator + + + + +

ExprLogicalOperator defines how multiple Exprs filters (if defined) are evaluated together. +Available values: and (&&), or (||) +Is optional and if left blank treated as and (&&).

+ + + + +script
+ +string + + + +

Script refers to a Lua script evaluated to determine the validity of an event.

+ + + + +

EventDependencyTransformer +

+

+(Appears on: +EventDependency) +

+

+

EventDependencyTransformer transforms the event

+

+ + + + + + + + + + @@ -1073,7 +1448,7 @@

GitArtifact

@@ -1182,7 +1555,7 @@

GitCreds

@@ -1575,8 +1949,7 @@

KafkaTrigger

@@ -1603,6 +1976,18 @@

KafkaTrigger

SASL configuration for the kafka client

+ + + +
FieldDescription
+jq
- -[]DataFilter - +string
-

Data filter constraints with escalation

+(Optional) +

JQ holds the jq command applied for transformation

-exprs
+script
- -[]ExprFilter - +string
-

Exprs contains the list of expressions evaluated against the event payload.

+(Optional) +

Script refers to a Lua script used to transform the event

sshKeySecret
- + Kubernetes core/v1.SecretKeySelector @@ -1147,16 +1522,14 @@

GitArtifact

-sshKeyPath
+insecureIgnoreHostKey
-string +bool
(Optional) -

DeprecatedSSHKeyPath is path to your ssh key path. Use this if you don’t want to provide username and password. -ssh key path must be mounted in sensor pod. -Deprecated: will be removed in v1.5, use SSHKeySecret instead.

+

Whether to ignore host key

username
- + Kubernetes core/v1.SecretKeySelector @@ -1194,7 +1567,7 @@

GitCreds

password
- + Kubernetes core/v1.SecretKeySelector @@ -1487,7 +1860,8 @@

KafkaTrigger

-

Partition to write data to.

+(Optional) +

DEPRECATED

-

The partitioning key for the messages put on the Kafka topic. -Defaults to broker url.

+

The partitioning key for the messages put on the Kafka topic.

+schemaRegistry
+ +github.com/argoproj/argo-events/pkg/apis/common.SchemaRegistryConfig + +
+(Optional) +

Schema Registry configuration to producer message with avro format

+

KubernetesResourceOperation @@ -1644,6 +2029,15 @@

LogTrigger +

LogicalOperator +(string alias)

+

+(Appears on: +EventDependency, +EventDependencyFilter) +

+

+

NATSTrigger

@@ -1778,7 +2172,7 @@

OpenWhiskTrigger authToken
- + Kubernetes core/v1.SecretKeySelector @@ -1873,6 +2267,229 @@

PayloadField +

PulsarTrigger +

+

+(Appears on: +TriggerTemplate) +

+

+

PulsarTrigger refers to the specification of the Pulsar trigger.

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
+url
+ +string + +
+

Configure the service URL for the Pulsar service.

+
+topic
+ +string + +
+

Name of the topic. +See https://pulsar.apache.org/docs/en/concepts-messaging/

+
+parameters
+ + +[]TriggerParameter + + +
+

Parameters is the list of parameters that is applied to resolved Kafka trigger object.

+
+payload
+ + +[]TriggerParameter + + +
+

Payload is the list of key-value extracted from an event payload to construct the request payload.

+
+tlsTrustCertsSecret
+ + +Kubernetes core/v1.SecretKeySelector + + +
+(Optional) +

Trusted TLS certificate secret.

+
+tlsAllowInsecureConnection
+ +bool + +
+(Optional) +

Whether the Pulsar client accept untrusted TLS certificate from broker.

+
+tlsValidateHostname
+ +bool + +
+(Optional) +

Whether the Pulsar client verify the validity of the host name from broker.

+
+tls
+ +github.com/argoproj/argo-events/pkg/apis/common.TLSConfig + +
+(Optional) +

TLS configuration for the pulsar client.

+
+authTokenSecret
+ + +Kubernetes core/v1.SecretKeySelector + + +
+(Optional) +

Authentication token for the pulsar client. +Either token or athenz can be set to use auth.

+
+connectionBackoff
+ +github.com/argoproj/argo-events/pkg/apis/common.Backoff + +
+(Optional) +

Backoff holds parameters applied to connection.

+
+authAthenzParams
+ +map[string]string + +
+(Optional) +

Authentication athenz parameters for the pulsar client. +Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go +Either token or athenz can be set to use auth.

+
+authAthenzSecret
+ + +Kubernetes core/v1.SecretKeySelector + + +
+(Optional) +

Authentication athenz privateKey secret for the pulsar client. +AuthAthenzSecret must be set if AuthAthenzParams is used.

+
+

RateLimit +

+

+(Appears on: +Trigger) +

+

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+unit
+ + +RateLimiteUnit + + +
+

Defaults to Second

+
+requestsPerUnit
+ +int32 + +
+
+

RateLimiteUnit +(string alias)

+

+(Appears on: +RateLimit) +

+

+

Sensor

@@ -1890,7 +2507,7 @@

Sensor metadata
- + Kubernetes meta/v1.ObjectMeta @@ -1955,19 +2572,6 @@

Sensor -dependencyGroups
- - -[]DependencyGroup - - - - -

DependencyGroups is a list of the groups of events.

- - - - errorOnFailedRound
bool @@ -1991,25 +2595,37 @@

Sensor -circuit
+replicas
-string +int32 -

Circuit is a boolean expression of dependency groups -Deprecated: will be removed in v1.5, use Switch in triggers instead.

+

Replicas is the sensor deployment replicas

-replicas
+revisionHistoryLimit
int32 -

Replicas is the sensor deployment replicas

+(Optional) +

RevisionHistoryLimit specifies how many old deployment revisions to retain

+ + + + +loggingFields
+ +map[string]string + + + +(Optional) +

LoggingFields add additional key-value pairs when logging happens

@@ -2089,19 +2705,6 @@

SensorSpec -dependencyGroups
- - -[]DependencyGroup - - - - -

DependencyGroups is a list of the groups of events.

- - - - errorOnFailedRound
bool @@ -2125,25 +2728,37 @@

SensorSpec -circuit
+replicas
-string +int32 -

Circuit is a boolean expression of dependency groups -Deprecated: will be removed in v1.5, use Switch in triggers instead.

+

Replicas is the sensor deployment replicas

-replicas
+revisionHistoryLimit
int32 -

Replicas is the sensor deployment replicas

+(Optional) +

RevisionHistoryLimit specifies how many old deployment revisions to retain

+ + + + +loggingFields
+ +map[string]string + + + +(Optional) +

LoggingFields add additional key-value pairs when logging happens

@@ -2180,6 +2795,90 @@

SensorStatus +

SlackSender +

+

+(Appears on: +SlackTrigger) +

+

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+username
+ +string + +
+(Optional) +

Username is the Slack application’s username

+
+icon
+ +string + +
+(Optional) +

Icon is the Slack application’s icon, e.g. :robot_face: or https://example.com/image.png

+
+

SlackThread +

+

+(Appears on: +SlackTrigger) +

+

+

+ + + + + + + + + + + + + + + + + +
FieldDescription
+messageAggregationKey
+ +string + +
+(Optional) +

MessageAggregationKey allows to aggregate the messages to a thread by some key.

+
+broadcastMessageToChannel
+ +bool + +
+(Optional) +

BroadcastMessageToChannel allows to also broadcast the message from the thread to the channel

+

SlackTrigger

@@ -2216,7 +2915,7 @@

SlackTrigger slackToken
- + Kubernetes core/v1.SecretKeySelector @@ -2234,7 +2933,7 @@

SlackTrigger (Optional) -

Channel refers to which Slack channel to send slack message.

+

Channel refers to which Slack channel to send Slack message.

@@ -2249,6 +2948,58 @@

SlackTrigger

Message refers to the message to send to the Slack channel.

+ + +attachments
+ +string + + + +(Optional) +

Attachments is a JSON format string that represents an array of Slack attachments according to the attachments API: https://api.slack.com/reference/messaging/attachments .

+ + + + +blocks
+ +string + + + +(Optional) +

Blocks is a JSON format string that represents an array of Slack blocks according to the blocks API: https://api.slack.com/reference/block-kit/blocks .

+ + + + +thread
+ + +SlackThread + + + + +(Optional) +

Thread refers to additional options for sending messages to a Slack thread.

+ + + + +sender
+ + +SlackSender + + + + +(Optional) +

Sender refers to additional configuration of the Slack application that sends the message.

+ +

StandardK8STrigger @@ -2270,22 +3021,6 @@

StandardK8STrigger -GroupVersionResource
- - -Kubernetes meta/v1.GroupVersionResource - - - - -

-(Members of GroupVersionResource are embedded into this type.) -

-

The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource

- - - - source
@@ -2294,7 +3029,7 @@

StandardK8STrigger -

Source of the K8 resource file(s)

+

Source of the K8s resource file(s)

@@ -2436,7 +3171,7 @@

Template container
-
+ Kubernetes core/v1.Container @@ -2450,7 +3185,7 @@

Template volumes
- + []Kubernetes core/v1.Volume @@ -2464,7 +3199,7 @@

Template securityContext
- + Kubernetes core/v1.PodSecurityContext @@ -2493,7 +3228,7 @@

Template tolerations
- + []Kubernetes core/v1.Toleration @@ -2507,7 +3242,7 @@

Template imagePullSecrets
- + []Kubernetes core/v1.LocalObjectReference @@ -2559,7 +3294,7 @@

Template affinity
- + Kubernetes core/v1.Affinity @@ -2690,6 +3425,34 @@

Trigger

Retry strategy, defaults to no retry

+ + +rateLimit
+ + +RateLimit + + + + +(Optional) +

Rate limit, default unit is Second

+ + + + +atLeastOnce
+ +bool + + + +(Optional) +

AtLeastOnce determines the trigger execution semantics. +Defaults to false. Trigger execution will use at-most-once semantics. +If set to true, Trigger execution will switch to at-least-once semantics.

+ +

TriggerParameter @@ -2699,11 +3462,14 @@

TriggerParameter AWSLambdaTrigger, ArgoWorkflowTrigger, AzureEventHubsTrigger, +AzureServiceBusTrigger, CustomTrigger, +EmailTrigger, HTTPTrigger, KafkaTrigger, NATSTrigger, OpenWhiskTrigger, +PulsarTrigger, SlackTrigger, StandardK8STrigger, Trigger) @@ -2870,6 +3636,21 @@

TriggerParameterSource If the DataKey is invalid and this is not defined, this param source will produce an error.

+ + +useRawData
+ +bool + + + +(Optional) +

UseRawData indicates if the value in an event at data key should be used without converting to string. +When true, a number, boolean, json or string parameter may be extracted. When the field is unspecified, or explicitly +false, the behavior is to turn the extracted field into a string. (e.g. when set to true, the parameter +123 will resolve to the numerical type, but when false, or not provided, the string “123” will be resolved)

+ +

TriggerPolicy @@ -2917,49 +3698,6 @@

TriggerPolicy -

TriggerSwitch -

-

-(Appears on: -TriggerTemplate) -

-

-

TriggerSwitch describes condition which must be satisfied in order to execute a trigger. -Depending upon condition type, status of dependency groups is used to evaluate the result. -Deprecated: will be removed in v1.5

-

- - - - - - - - - - - - - - - - - -
FieldDescription
-any
- -[]string - -
-

Any acts as a OR operator between dependencies

-
-all
- -[]string - -
-

All acts as a AND operator between dependencies

-

TriggerTemplate

@@ -3141,31 +3879,72 @@

TriggerTemplate -switch
+azureEventHubs
- -TriggerSwitch + +AzureEventHubsTrigger (Optional) -

DeprecatedSwitch is the condition to execute the trigger. -Deprecated: will be removed in v1.5, use conditions instead

+

AzureEventHubs refers to the trigger send an event to an Azure Event Hub.

-azureEventHubs
+pulsar
- -AzureEventHubsTrigger + +PulsarTrigger (Optional) -

AzureEventHubs refers to the trigger send an event to an Azure Event Hub.

+

Pulsar refers to the trigger designed to place messages on Pulsar topic.

+ + + + +conditionsReset
+ + +[]ConditionsResetCriteria + + + + +(Optional) +

Criteria to reset the conditons

+ + + + +azureServiceBus
+ + +AzureServiceBusTrigger + + + + +(Optional) +

AzureServiceBus refers to the trigger designed to place messages on Azure Service Bus

+ + + + +email
+ + +EmailTrigger + + + + +(Optional) +

Email refers to the trigger designed to send an email notification

diff --git a/api/sensor.md b/api/sensor.md index dec50d5a0e..7794974fa5 100644 --- a/api/sensor.md +++ b/api/sensor.md @@ -55,24 +55,26 @@ FunctionName refers to the name of the function to invoke. accessKey
- + Kubernetes core/v1.SecretKeySelector +(Optional)

-AccessKey refers K8 secret containing aws access key +AccessKey refers K8s secret containing aws access key

secretKey
- + Kubernetes core/v1.SecretKeySelector +(Optional)

-SecretKey refers K8 secret containing aws secret key +SecretKey refers K8s secret containing aws secret key

@@ -146,6 +148,17 @@ permission to invoke the function. + + +roleARN
string + + +(Optional) +

+RoleARN is the Amazon Resource Name (ARN) of the role to assume. +

+ +

@@ -194,7 +207,7 @@ Description

-Source of the K8 resource file(s) +Source of the K8s resource file(s)

@@ -227,18 +240,11 @@ object -GroupVersionResource
- -Kubernetes meta/v1.GroupVersionResource +args
\[\]string

-(Members of GroupVersionResource are embedded into this -type.) -

-

-The unambiguous kind of this object - used in order to retrieve the -appropriate kubernetes api client for this resource +Args is the list of arguments to pass to the argo CLI

@@ -315,7 +321,7 @@ URL to fetch the artifact from configmap
- + Kubernetes core/v1.ConfigMapKeySelector @@ -397,7 +403,7 @@ HubName refers to the Azure Event Hub to send events to sharedAccessKeyName
- + Kubernetes core/v1.SecretKeySelector @@ -409,7 +415,7 @@ SharedAccessKeyName refers to the name of the Shared Access Key sharedAccessKey
- + Kubernetes core/v1.SecretKeySelector @@ -448,6 +454,110 @@ are applied to the trigger resource. +

+AzureServiceBusTrigger +

+

+(Appears on: +TriggerTemplate) +

+

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+connectionString
+ +Kubernetes core/v1.SecretKeySelector +
+

+ConnectionString is the connection string for the Azure Service Bus +

+
+queueName
string +
+

+QueueName is the name of the Azure Service Bus Queue +

+
+topicName
string +
+

+TopicName is the name of the Azure Service Bus Topic +

+
+subscriptionName
string +
+

+SubscriptionName is the name of the Azure Service Bus Topic Subscription +

+
+tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig +
+(Optional) +

+TLS configuration for the service bus client +

+
+payload
+ \[\]TriggerParameter + +
+

+Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+
+parameters
+ \[\]TriggerParameter + +
+(Optional) +

+Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+

Comparator (string alias)

@@ -461,6 +571,84 @@ Comparator (string alias) Comparator refers to the comparator operator for a data filter

+

+ConditionsResetByTime +

+

+(Appears on: +ConditionsResetCriteria) +

+

+

+ + + + + + + + + + + + + + + + + +
+Field + +Description +
+cron
string +
+

+Cron is a cron-like expression. For reference, see: +https://en.wikipedia.org/wiki/Cron +

+
+timezone
string +
+(Optional) +
+

+ConditionsResetCriteria +

+

+(Appears on: +TriggerTemplate) +

+

+

+ + + + + + + + + + + + + +
+Field + +Description +
+byTime
+ +ConditionsResetByTime +
+

+Schedule is a cron-like expression. For reference, see: +https://en.wikipedia.org/wiki/Cron +

+

CustomTrigger

@@ -509,7 +697,7 @@ gRPC certSecret
- + Kubernetes core/v1.SecretKeySelector @@ -570,18 +758,6 @@ construct the request payload.

- - -certFilePath
string - - -

-DeprecatedCertFilePath is path to the cert file within sensor for secure -connection between sensor and custom trigger gRPC server. Deprecated: -will be removed in v1.5, use CertSecret instead -

- -

@@ -621,7 +797,7 @@ Path is the JSONPath of the event’s (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters ‘\*’ and ‘?’. To access an array value use the index as the key. The dot and wildcard characters can be escaped with ‘’. See -https://github.com/tidwall/gjson\#path-syntax +https://github.com/tidwall/gjson#path-syntax for more information on how to use this.

@@ -657,8 +833,8 @@ strconv.ParseFloat() Strings are taken as is Nils this value is ignored

Comparator compares the event data with a user given value. Can be -“>=”, “>”, “=”, “!=”, “<”, or “<=”. Is optional, and if left -blank treated as equality “=”. +“\>=”, “\>”, “=”, “!=”, “\<”, or “\<=”. Is optional, and if left blank +treated as equality “=”.

@@ -680,16 +856,17 @@ and -

-DependencyGroup +

+EmailTrigger

(Appears on: -SensorSpec) +TriggerTemplate)

-DependencyGroup is the group of dependencies +EmailTrigger refers to the specification of the email notification +trigger.

@@ -706,21 +883,106 @@ Description + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -854,7 +1116,7 @@ Subject - The subject of the event in the context of the event producer + + + + + + + + + +
-name
string +parameters
+ \[\]TriggerParameter +
+(Optional) +

+Parameters is the list of key-value extracted from event’s payload that +are applied to the trigger resource. +

+
+username
string +
+(Optional) +

+Username refers to the username used to connect to the smtp server. +

+
+smtpPassword
+ +Kubernetes core/v1.SecretKeySelector +
+(Optional) +

+SMTPPassword refers to the Kubernetes secret that holds the smtp +password used to connect to smtp server. +

+
+host
string +
+

+Host refers to the smtp host url to which email is send. +

+
+port
int32 +
+(Optional) +

+Port refers to the smtp server port to which email is send. Defaults to +0. +

+
+to
\[\]string +
+(Optional)

-Name of the group +To refers to the email addresses to which the emails are send.

-dependencies
\[\]string +from
string +
+(Optional) +

+From refers to the address from which the email is send from. +

+subject
string +
+(Optional) +

+Subject refers to the subject line for the email send. +

+
+body
string +
+(Optional)

-Dependencies of events +Body refers to the body/content of the email send.

time
- + Kubernetes meta/v1.Time
@@ -932,10 +1194,146 @@ context and data of an event

+transform
+ +EventDependencyTransformer +
+

+Transform transforms the event data +

+
+filtersLogicalOperator
+ LogicalOperator + +
+

+FiltersLogicalOperator defines how different filters are evaluated +together. Available values: and (&&), or (\|\|) Is optional and if left +blank treated as and (&&). +

+
+

+EventDependencyFilter +

+

+(Appears on: +EventDependency) +

+

+

+EventDependencyFilter defines filters and constraints for a event. +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+time
+TimeFilter +
+

+Time filter on the event with escalation +

+
+context
+ EventContext +
+

+Context filter constraints +

+
+data
+\[\]DataFilter +
+

+Data filter constraints with escalation +

+
+exprs
+\[\]ExprFilter +
+

+Exprs contains the list of expressions evaluated against the event +payload. +

+
+dataLogicalOperator
+ LogicalOperator + +
+

+DataLogicalOperator defines how multiple Data filters (if defined) are +evaluated together. Available values: and (&&), or (\|\|) Is optional +and if left blank treated as and (&&). +

+
+exprLogicalOperator
+ LogicalOperator + +
+

+ExprLogicalOperator defines how multiple Exprs filters (if defined) are +evaluated together. Available values: and (&&), or (\|\|) Is optional +and if left blank treated as and (&&). +

+
+script
string +
+

+Script refers to a Lua script evaluated to determine the validity of an +event. +

+
-

-EventDependencyFilter +

+EventDependencyTransformer

(Appears on: @@ -943,7 +1341,7 @@ EventDependencyFilter

-EventDependencyFilter defines filters and constraints for a event. +EventDependencyTransformer transforms the event

@@ -960,46 +1358,23 @@ Description - - - - - - - - @@ -1145,7 +1520,7 @@ Creds contain reference to git username and password @@ -1256,7 +1628,7 @@ Description @@ -1573,8 +1945,9 @@ Name of the topic. More info at partition
int32 @@ -1657,8 +2030,7 @@ construct the request payload. @@ -1686,6 +2058,19 @@ SASL configuration for the kafka client

+ + + +
-time
-TimeFilter -
-

-Time filter on the event with escalation -

-
-context
- EventContext -
-

-Context filter constraints -

-
-data
-\[\]DataFilter +jq
string
+(Optional)

-Data filter constraints with escalation +JQ holds the jq command applied for transformation

-exprs
-\[\]ExprFilter +script
string
+(Optional)

-Exprs contains the list of expressions evaluated against the event -payload. +Script refers to a Lua script used to transform the event

sshKeySecret
- + Kubernetes core/v1.SecretKeySelector
@@ -1215,15 +1590,12 @@ Refer
-sshKeyPath
string +insecureIgnoreHostKey
bool
(Optional)

-DeprecatedSSHKeyPath is path to your ssh key path. Use this if you don’t -want to provide username and password. ssh key path must be mounted in -sensor pod. Deprecated: will be removed in v1.5, use SSHKeySecret -instead. +Whether to ignore host key

username
- + Kubernetes core/v1.SecretKeySelector
@@ -1265,7 +1637,7 @@ Kubernetes core/v1.SecretKeySelector
password
- + Kubernetes core/v1.SecretKeySelector
@@ -1564,7 +1936,7 @@ URL of the Kafka broker, multiple URLs separated by comma.

Name of the topic. More info at -https://kafka.apache.org/documentation/\#intro\_topics +https://kafka.apache.org/documentation/#intro_topics

+(Optional)

-Partition to write data to. +DEPRECATED

-The partitioning key for the messages put on the Kafka topic. Defaults -to broker url. +The partitioning key for the messages put on the Kafka topic.

+schemaRegistry
+github.com/argoproj/argo-events/pkg/apis/common.SchemaRegistryConfig + +
+(Optional) +

+Schema Registry configuration to producer message with avro format +

+

@@ -1737,6 +2122,17 @@ data for busy events. +

+LogicalOperator (string alias) +

+

+

+(Appears on: +EventDependency, +EventDependencyFilter) +

+

+

NATSTrigger

@@ -1871,7 +2267,7 @@ Namespace for the action. Defaults to “\_”. authToken
- + Kubernetes core/v1.SecretKeySelector @@ -1955,7 +2351,7 @@ Path is the JSONPath of the event’s (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters ‘\*’ and ‘?’. To access an array value use the index as the key. The dot and wildcard characters can be escaped with ‘’. See -https://github.com/tidwall/gjson\#path-syntax +https://github.com/tidwall/gjson#path-syntax for more information on how to use this.

@@ -1972,6 +2368,232 @@ Name acts as key that holds the value at the path. +

+PulsarTrigger +

+

+(Appears on: +TriggerTemplate) +

+

+

+PulsarTrigger refers to the specification of the Pulsar trigger. +

+

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Field + +Description +
+url
string +
+

+Configure the service URL for the Pulsar service. +

+
+topic
string +
+

+Name of the topic. See +https://pulsar.apache.org/docs/en/concepts-messaging/ +

+
+parameters
+ \[\]TriggerParameter + +
+

+Parameters is the list of parameters that is applied to resolved Kafka +trigger object. +

+
+payload
+ \[\]TriggerParameter + +
+

+Payload is the list of key-value extracted from an event payload to +construct the request payload. +

+
+tlsTrustCertsSecret
+ +Kubernetes core/v1.SecretKeySelector +
+(Optional) +

+Trusted TLS certificate secret. +

+
+tlsAllowInsecureConnection
bool +
+(Optional) +

+Whether the Pulsar client accept untrusted TLS certificate from broker. +

+
+tlsValidateHostname
bool +
+(Optional) +

+Whether the Pulsar client verify the validity of the host name from +broker. +

+
+tls
+github.com/argoproj/argo-events/pkg/apis/common.TLSConfig +
+(Optional) +

+TLS configuration for the pulsar client. +

+
+authTokenSecret
+ +Kubernetes core/v1.SecretKeySelector +
+(Optional) +

+Authentication token for the pulsar client. Either token or athenz can +be set to use auth. +

+
+connectionBackoff
+github.com/argoproj/argo-events/pkg/apis/common.Backoff +
+(Optional) +

+Backoff holds parameters applied to connection. +

+
+authAthenzParams
map\[string\]string +
+(Optional) +

+Authentication athenz parameters for the pulsar client. Refer +https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go +Either token or athenz can be set to use auth. +

+
+authAthenzSecret
+ +Kubernetes core/v1.SecretKeySelector +
+(Optional) +

+Authentication athenz privateKey secret for the pulsar client. +AuthAthenzSecret must be set if AuthAthenzParams is used. +

+
+

+RateLimit +

+

+(Appears on: +Trigger) +

+

+

+ + + + + + + + + + + + + + + + + +
+Field + +Description +
+unit
+ RateLimiteUnit + +
+

+Defaults to Second +

+
+requestsPerUnit
int32 +
+
+

+RateLimiteUnit (string alias) +

+

+

+(Appears on: +RateLimit) +

+

+

Sensor

@@ -1995,7 +2617,7 @@ Description metadata
- + Kubernetes meta/v1.ObjectMeta @@ -2049,58 +2671,58 @@ Template is the pod specification for the sensor -dependencyGroups
- \[\]DependencyGroup - +errorOnFailedRound
bool

-DependencyGroups is a list of the groups of events. +ErrorOnFailedRound if set to true, marks sensor state as +error if the previous trigger round fails. Once sensor +state is set to error, no further triggers will be +processed.

-errorOnFailedRound
bool +eventBusName
string

-ErrorOnFailedRound if set to true, marks sensor state as -error if the previous trigger round fails. Once sensor -state is set to error, no further triggers will be -processed. +EventBusName references to a EventBus name. By default the value is +“default”

-eventBusName
string +replicas
int32

-EventBusName references to a EventBus name. By default the value is -“default” +Replicas is the sensor deployment replicas

-circuit
string +revisionHistoryLimit
int32 +(Optional)

-Circuit is a boolean expression of dependency groups Deprecated: will be -removed in v1.5, use Switch in triggers instead. +RevisionHistoryLimit specifies how many old deployment revisions to +retain

-replicas
int32 +loggingFields
map\[string\]string +(Optional)

-Replicas is the sensor deployment replicas +LoggingFields add additional key-value pairs when logging happens

@@ -2179,18 +2801,6 @@ Template is the pod specification for the sensor -dependencyGroups
- \[\]DependencyGroup - - - -

-DependencyGroups is a list of the groups of events. -

- - - - errorOnFailedRound
bool @@ -2215,22 +2825,34 @@ EventBusName references to a EventBus name. By default the value is -circuit
string +replicas
int32

-Circuit is a boolean expression of dependency groups Deprecated: will be -removed in v1.5, use Switch in triggers instead. +Replicas is the sensor deployment replicas

-replicas
int32 +revisionHistoryLimit
int32 +(Optional)

-Replicas is the sensor deployment replicas +RevisionHistoryLimit specifies how many old deployment revisions to +retain +

+ + + + +loggingFields
map\[string\]string + + +(Optional) +

+LoggingFields add additional key-value pairs when logging happens

@@ -2272,6 +2894,99 @@ github.com/argoproj/argo-events/pkg/apis/common.Status
+

+SlackSender +

+

+(Appears on: +SlackTrigger) +

+

+

+ + + + + + + + + + + + + + + + + +
+Field + +Description +
+username
string +
+(Optional) +

+Username is the Slack application’s username +

+
+icon
string +
+(Optional) +

+Icon is the Slack application’s icon, e.g. :robot_face: or +https://example.com/image.png +

+
+

+SlackThread +

+

+(Appears on: +SlackTrigger) +

+

+

+ + + + + + + + + + + + + + + + + +
+Field + +Description +
+messageAggregationKey
string +
+(Optional) +

+MessageAggregationKey allows to aggregate the messages to a thread by +some key. +

+
+broadcastMessageToChannel
bool +
+(Optional) +

+BroadcastMessageToChannel allows to also broadcast the message from the +thread to the channel +

+

SlackTrigger

@@ -2314,7 +3029,7 @@ are applied to the trigger resource. slackToken
- + Kubernetes core/v1.SecretKeySelector @@ -2331,7 +3046,7 @@ required to send messages. (Optional)

-Channel refers to which Slack channel to send slack message. +Channel refers to which Slack channel to send Slack message.

@@ -2346,6 +3061,60 @@ Message refers to the message to send to the Slack channel.

+ + +attachments
string + + +(Optional) +

+Attachments is a JSON format string that represents an array of Slack +attachments according to the attachments API: +https://api.slack.com/reference/messaging/attachments +. +

+ + + + +blocks
string + + +(Optional) +

+Blocks is a JSON format string that represents an array of Slack blocks +according to the blocks API: +https://api.slack.com/reference/block-kit/blocks +. +

+ + + + +thread
+ SlackThread + + +(Optional) +

+Thread refers to additional options for sending messages to a Slack +thread. +

+ + + + +sender
+ SlackSender + + +(Optional) +

+Sender refers to additional configuration of the Slack application that +sends the message. +

+ +

@@ -2374,30 +3143,13 @@ Description -GroupVersionResource
- -Kubernetes meta/v1.GroupVersionResource - - -

-(Members of GroupVersionResource are embedded into this -type.) -

-

-The unambiguous kind of this object - used in order to retrieve the -appropriate kubernetes api client for this resource -

- - - - source
ArtifactLocation

-Source of the K8 resource file(s) +Source of the K8s resource file(s)

@@ -2547,7 +3299,7 @@ sensor pod. More info: container
- + Kubernetes core/v1.Container @@ -2560,7 +3312,7 @@ Container is the main container image to run in the sensor pod volumes
- + \[\]Kubernetes core/v1.Volume @@ -2574,7 +3326,7 @@ workflow. securityContext
- + Kubernetes core/v1.PodSecurityContext @@ -2603,7 +3355,7 @@ scheduled on that node. More info: tolerations
- + \[\]Kubernetes core/v1.Toleration @@ -2616,7 +3368,7 @@ If specified, the pod’s tolerations. imagePullSecrets
- + \[\]Kubernetes core/v1.LocalObjectReference @@ -2627,7 +3379,7 @@ same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: -https://kubernetes.io/docs/concepts/containers/images\#specifying-imagepullsecrets-on-a-pod +https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod

@@ -2667,7 +3419,7 @@ value, the higher the priority. More info: affinity
- + Kubernetes core/v1.Affinity @@ -2805,6 +3557,31 @@ Retry strategy, defaults to no retry

+ + +rateLimit
+ RateLimit + + +(Optional) +

+Rate limit, default unit is Second +

+ + + + +atLeastOnce
bool + + +(Optional) +

+AtLeastOnce determines the trigger execution semantics. Defaults to +false. Trigger execution will use at-most-once semantics. If set to +true, Trigger execution will switch to at-least-once semantics. +

+ +

@@ -2815,11 +3592,14 @@ TriggerParameter AWSLambdaTrigger, ArgoWorkflowTrigger, AzureEventHubsTrigger, +AzureServiceBusTrigger, CustomTrigger, +EmailTrigger, HTTPTrigger, KafkaTrigger, NATSTrigger, OpenWhiskTrigger, +PulsarTrigger, SlackTrigger, StandardK8STrigger, Trigger) @@ -2863,7 +3643,7 @@ dependency Dest is the JSONPath of a resource key. A path is a series of keys separated by a dot. The colon character can be escaped with ‘.’ The -1 key can be used to append a value to an existing array. See -https://github.com/tidwall/sjson\#path-syntax +https://github.com/tidwall/sjson#path-syntax for more information about how this is used.

@@ -2946,7 +3726,7 @@ ContextKey is a series of keys separated by a dot. A key may contain wildcard characters ‘\*’ and ‘?’. To access an array value use the index as the key. The dot and wildcard characters can be escaped with ‘’. See -https://github.com/tidwall/gjson\#path-syntax +https://github.com/tidwall/gjson#path-syntax for more information on how to use this.

@@ -2978,7 +3758,7 @@ DataKey is the JSONPath of the event’s (JSON decoded) data key DataKey is a series of keys separated by a dot. A key may contain wildcard characters ‘\*’ and ‘?’. To access an array value use the index as the key. The dot and wildcard characters can be escaped with ‘’. See -https://github.com/tidwall/gjson\#path-syntax +https://github.com/tidwall/gjson#path-syntax for more information on how to use this.

@@ -3011,6 +3791,23 @@ this is not defined, this param source will produce an error.

+ + +useRawData
bool + + +(Optional) +

+UseRawData indicates if the value in an event at data key should be used +without converting to string. When true, a number, boolean, json or +string parameter may be extracted. When the field is unspecified, or +explicitly false, the behavior is to turn the extracted field into a +string. (e.g. when set to true, the parameter 123 will resolve to the +numerical type, but when false, or not provided, the string “123” will +be resolved) +

+ +

@@ -3064,55 +3861,6 @@ response status -

-TriggerSwitch -

-

-(Appears on: -TriggerTemplate) -

-

-

-TriggerSwitch describes condition which must be satisfied in order to -execute a trigger. Depending upon condition type, status of dependency -groups is used to evaluate the result. Deprecated: will be removed in -v1.5 -

-

- - - - - - - - - - - - - - - - - -
-Field - -Description -
-any
\[\]string -
-

-Any acts as a OR operator between dependencies -

-
-all
\[\]string -
-

-All acts as a AND operator between dependencies -

-

TriggerTemplate

@@ -3288,28 +4036,66 @@ Log refers to the trigger designed to invoke log the event. -switch
- TriggerSwitch +azureEventHubs
+ +AzureEventHubsTrigger + + +(Optional) +

+AzureEventHubs refers to the trigger send an event to an Azure Event +Hub. +

+ + + + +pulsar
+ PulsarTrigger (Optional)

-DeprecatedSwitch is the condition to execute the trigger. Deprecated: -will be removed in v1.5, use conditions instead +Pulsar refers to the trigger designed to place messages on Pulsar topic.

-azureEventHubs
- -AzureEventHubsTrigger +conditionsReset
+ +\[\]ConditionsResetCriteria (Optional)

-AzureEventHubs refers to the trigger send an event to an Azure Event -Hub. +Criteria to reset the conditons +

+ + + + +azureServiceBus
+ +AzureServiceBusTrigger + + +(Optional) +

+AzureServiceBus refers to the trigger designed to place messages on +Azure Service Bus +

+ + + + +email
+ EmailTrigger + + +(Optional) +

+Email refers to the trigger designed to send an email notification

diff --git a/cmd/commands/controller.go b/cmd/commands/controller.go new file mode 100644 index 0000000000..26bea82f11 --- /dev/null +++ b/cmd/commands/controller.go @@ -0,0 +1,44 @@ +package commands + +import ( + "github.com/spf13/cobra" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + controllercmd "github.com/argoproj/argo-events/controllers/cmd" + envpkg "github.com/argoproj/pkg/env" +) + +func NewControllerCommand() *cobra.Command { + var ( + leaderElection bool + namespaced bool + managedNamespace string + metricsPort int32 + healthPort int32 + klogLevel int + ) + + command := &cobra.Command{ + Use: "controller", + Short: "Start the controller", + Run: func(cmd *cobra.Command, args []string) { + logging.SetKlogLevel(klogLevel) + eventOpts := controllercmd.ArgoEventsControllerOpts{ + LeaderElection: leaderElection, + ManagedNamespace: managedNamespace, + Namespaced: namespaced, + MetricsPort: metricsPort, + HealthPort: healthPort, + } + controllercmd.Start(eventOpts) + }, + } + command.Flags().BoolVar(&namespaced, "namespaced", false, "Whether to run in namespaced scope, defaults to false.") + command.Flags().StringVar(&managedNamespace, "managed-namespace", envpkg.LookupEnvStringOr("NAMESPACE", "argo-events"), "The namespace that the controller watches when \"--namespaced\" is \"true\".") + command.Flags().BoolVar(&leaderElection, "leader-election", true, "Enable leader election") + command.Flags().Int32Var(&metricsPort, "metrics-port", common.ControllerMetricsPort, "Metrics port") + command.Flags().Int32Var(&healthPort, "health-port", common.ControllerHealthPort, "Health port") + command.Flags().IntVar(&klogLevel, "kloglevel", 0, "klog level") + return command +} diff --git a/cmd/commands/eventbuscontroller.go b/cmd/commands/eventbuscontroller.go deleted file mode 100644 index 19d72a6ef5..0000000000 --- a/cmd/commands/eventbuscontroller.go +++ /dev/null @@ -1,27 +0,0 @@ -package commands - -import ( - "github.com/spf13/cobra" - - eventbuscmd "github.com/argoproj/argo-events/controllers/eventbus/cmd" - envpkg "github.com/argoproj/pkg/env" -) - -func NewEventBusControllerCommand() *cobra.Command { - var ( - namespaced bool - managedNamespace string - ) - - command := &cobra.Command{ - Use: "eventbus-controller", - Short: "Start an EventBus controller", - Run: func(cmd *cobra.Command, args []string) { - eventbuscmd.Start(namespaced, managedNamespace) - - }, - } - command.Flags().BoolVar(&namespaced, "namespaced", false, "Whether to run in namespaced scope, defaults to false.") - command.Flags().StringVar(&managedNamespace, "managed-namespace", envpkg.LookupEnvStringOr("NAMESPACE", "argo-events"), "The namespace that the controller watches when \"--namespaced\" is \"true\".") - return command -} diff --git a/cmd/commands/eventsourcecontroller.go b/cmd/commands/eventsourcecontroller.go deleted file mode 100644 index af128684c1..0000000000 --- a/cmd/commands/eventsourcecontroller.go +++ /dev/null @@ -1,26 +0,0 @@ -package commands - -import ( - "github.com/spf13/cobra" - - eventsourcecmd "github.com/argoproj/argo-events/controllers/eventsource/cmd" - envpkg "github.com/argoproj/pkg/env" -) - -func NewEventSourceControllerCommand() *cobra.Command { - var ( - namespaced bool - managedNamespace string - ) - - command := &cobra.Command{ - Use: "eventsource-controller", - Short: "Start an EventSource controller", - Run: func(cmd *cobra.Command, args []string) { - eventsourcecmd.Start(namespaced, managedNamespace) - }, - } - command.Flags().BoolVar(&namespaced, "namespaced", false, "Whether to run in namespaced scope, defaults to false.") - command.Flags().StringVar(&managedNamespace, "managed-namespace", envpkg.LookupEnvStringOr("NAMESPACE", "argo-events"), "The namespace that the controller watches when \"--namespaced\" is \"true\".") - return command -} diff --git a/cmd/commands/root.go b/cmd/commands/root.go index 109dbfa7a8..a2d41b1aab 100644 --- a/cmd/commands/root.go +++ b/cmd/commands/root.go @@ -23,9 +23,7 @@ func Execute() { } func init() { - rootCmd.AddCommand(NewEventBusControllerCommand()) - rootCmd.AddCommand(NewEventSourceControllerCommand()) - rootCmd.AddCommand(NewSensorControllerCommand()) + rootCmd.AddCommand(NewControllerCommand()) rootCmd.AddCommand(NewEventSourceCommand()) rootCmd.AddCommand(NewSensorCommand()) rootCmd.AddCommand(NewWebhookCommand()) diff --git a/cmd/commands/sensorcontroller.go b/cmd/commands/sensorcontroller.go deleted file mode 100644 index 06aaefc8f2..0000000000 --- a/cmd/commands/sensorcontroller.go +++ /dev/null @@ -1,26 +0,0 @@ -package commands - -import ( - "github.com/spf13/cobra" - - sensorcmd "github.com/argoproj/argo-events/controllers/sensor/cmd" - envpkg "github.com/argoproj/pkg/env" -) - -func NewSensorControllerCommand() *cobra.Command { - var ( - namespaced bool - managedNamespace string - ) - - command := &cobra.Command{ - Use: "sensor-controller", - Short: "Start a Sensor controller", - Run: func(cmd *cobra.Command, args []string) { - sensorcmd.Start(namespaced, managedNamespace) - }, - } - command.Flags().BoolVar(&namespaced, "namespaced", false, "Whether to run in namespaced scope, defaults to false.") - command.Flags().StringVar(&managedNamespace, "managed-namespace", envpkg.LookupEnvStringOr("NAMESPACE", "argo-events"), "The namespace that the controller watches when \"--namespaced\" is \"true\".") - return command -} diff --git a/codefresh/codefresh.go b/codefresh/codefresh.go index 28376fc8cf..522b22b34a 100644 --- a/codefresh/codefresh.go +++ b/codefresh/codefresh.go @@ -3,31 +3,192 @@ package codefresh import ( "bytes" "context" + "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" + "os" + "strconv" "time" + cloudevents "github.com/cloudevents/sdk-go/v2" "github.com/pkg/errors" + "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" ) const ( + EnvVarShouldReportToCF = "SHOULD_REPORT_TO_CF" + cfConfigMapName = "codefresh-cm" cfBaseURLConfigMapKey = "base-url" cfSecretName = "codefresh-token" cfAuthSecretKey = "token" ) -type Config struct { - BaseURL string - AuthToken string +var withRetry = common.DoWithRetry // alias + +var eventTypesToReportWhitelist = map[apicommon.EventSourceType]bool{ + apicommon.GithubEvent: true, + apicommon.GitlabEvent: true, + apicommon.BitbucketEvent: true, + apicommon.BitbucketServerEvent: true, + apicommon.CalendarEvent: true, +} + +type config struct { + baseURL string + authToken string +} + +type Client struct { + ctx context.Context + logger *zap.SugaredLogger + cfConfig *config + httpClient *http.Client + dryRun bool +} + +type ErrorContext struct { + metav1.ObjectMeta + metav1.TypeMeta } -func GetCodefreshConfig(ctx context.Context, namespace string) (*Config, error) { +type object struct { + Group string `json:"group"` + Version string `json:"version"` + Kind string `json:"kind"` + Name string `json:"name"` + Namespace string `json:"namespace"` + Labels map[string]string `json:"labels"` +} + +type errorContext struct { + Object object `json:"object"` +} + +type errorPayload struct { + ErrMsg string `json:"errMsg"` + Context errorContext `json:"context"` +} + +func NewClient(ctx context.Context, namespace string) (*Client, error) { + logger := logging.FromContext(ctx) + + dryRun := !shouldEnableReporting() + if dryRun { + return &Client{ + logger: logger, + dryRun: true, + }, nil + } + + config, err := getCodefreshConfig(ctx, namespace) + if err != nil { + return nil, err + } + + return &Client{ + ctx: ctx, + logger: logger, + cfConfig: config, + httpClient: &http.Client{ + Timeout: 30 * time.Second, + }, + }, nil +} + +func (c *Client) ReportEvent(event cloudevents.Event) { + if !shouldReportEvent(event) { + return + } + + if c.dryRun { + c.logger.Infow("succeeded to report an event to Codefresh", zap.String(logging.LabelEventName, event.Subject()), + zap.String(logging.LabelEventSourceType, event.Type()), zap.String("eventID", event.ID()), zap.String("dryRun", "true")) + return + } + + eventJson, err := json.Marshal(event) + if err != nil { + c.logger.Errorw("failed to report an event to Codefresh", zap.Error(err), zap.String(logging.LabelEventName, event.Subject()), + zap.String(logging.LabelEventSourceType, event.Type()), zap.String("eventID", event.ID())) + return + } + + url := c.cfConfig.baseURL + "/2.0/api/events/event-payload" + err = c.sendJSON(eventJson, url) + if err != nil { + c.logger.Errorw("failed to report an event to Codefresh", zap.Error(err), zap.String(logging.LabelEventName, event.Subject()), + zap.String(logging.LabelEventSourceType, event.Type()), zap.String("eventID", event.ID())) + } else { + c.logger.Infow("succeeded to report an event to Codefresh", zap.String(logging.LabelEventName, event.Subject()), + zap.String(logging.LabelEventSourceType, event.Type()), zap.String("eventID", event.ID())) + } +} + +func (c *Client) ReportError(originalErr error, errContext ErrorContext) { + originalErrMsg := originalErr.Error() + + if c.dryRun { + c.logger.Infow("succeeded to report an error to Codefresh", + zap.String("originalError", originalErrMsg), zap.String("dryRun", "true")) + return + } + + errPayloadJson, err := json.Marshal(constructErrorPayload(originalErrMsg, errContext)) + if err != nil { + c.logger.Errorw("failed to report an error to Codefresh", zap.Error(err), zap.String("originalError", originalErrMsg)) + return + } + + url := c.cfConfig.baseURL + "/2.0/api/events/error" + err = c.sendJSON(errPayloadJson, url) + if err != nil { + c.logger.Errorw("failed to report an error to Codefresh", zap.Error(err), zap.String("originalError", originalErrMsg)) + } else { + c.logger.Infow("succeeded to report an error to Codefresh", zap.String("originalError", originalErrMsg)) + } +} + +func (c *Client) sendJSON(jsonBody []byte, url string) error { + return withRetry(&common.DefaultBackoff, func() error { + req, err := http.NewRequestWithContext(c.ctx, "POST", url, bytes.NewBuffer(jsonBody)) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", c.cfConfig.authToken) + + res, err := c.httpClient.Do(req) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("failed reporting to Codefresh, event: %s", string(jsonBody))) + } + defer res.Body.Close() + + isStatusOK := res.StatusCode >= 200 && res.StatusCode < 300 + if !isStatusOK { + b, _ := io.ReadAll(res.Body) + return errors.Errorf("failed reporting to Codefresh, got response: status code %d and body %s, original request body: %s", + res.StatusCode, string(b), string(jsonBody)) + } + + return nil + }) +} + +func shouldReportEvent(event cloudevents.Event) bool { + return eventTypesToReportWhitelist[apicommon.EventSourceType(event.Type())] +} + +func getCodefreshConfig(ctx context.Context, namespace string) (*config, error) { kubeClient, err := common.CreateKubeClient() if err != nil { return nil, err @@ -41,9 +202,9 @@ func GetCodefreshConfig(ctx context.Context, namespace string) (*Config, error) return nil, err } - return &Config{ - BaseURL: baseURL, - AuthToken: token, + return &config{ + baseURL: baseURL, + authToken: token, }, nil } @@ -54,6 +215,7 @@ func getCodefreshAuthToken(ctx context.Context, kubeClient kubernetes.Interface, Name: cfSecretName, }, } + return common.GetSecretValue(ctx, kubeClient, namespace, cfSecretSelector) } @@ -64,31 +226,35 @@ func getCodefreshBaseURL(ctx context.Context, kubeClient kubernetes.Interface, n Name: cfConfigMapName, }, } + return common.GetConfigMapValue(ctx, kubeClient, namespace, cfConfigMapSelector) } -func ReportEventToCodefresh(eventJson []byte, config *Config) error { - contentType := "application/json" - url := config.BaseURL + "/2.0/api/events/event-payload" - req, _ := http.NewRequest("POST", url, bytes.NewBuffer(eventJson)) - req.Header.Set("Content-Type", contentType) - req.Header.Set("Authorization", config.AuthToken) +func constructErrorPayload(errMsg string, errContext ErrorContext) errorPayload { + gvk := errContext.GroupVersionKind() - client := &http.Client{ - Timeout: 30 * time.Second, - } - res, err := client.Do(req) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("failed reporting to Codefresh, event: %s", string(eventJson))) + return errorPayload{ + ErrMsg: errMsg, + Context: errorContext{ + Object: object{ + Name: errContext.Name, + Namespace: errContext.Namespace, + Group: gvk.Group, + Version: gvk.Version, + Kind: gvk.Kind, + Labels: errContext.Labels, + }, + }, } - defer res.Body.Close() +} - isStatusOK := res.StatusCode >= 200 && res.StatusCode < 300 - if !isStatusOK { - b, _ := ioutil.ReadAll(res.Body) - return errors.Errorf("failed reporting to Codefresh, got response: status code %d and body %s, event: %s", - res.StatusCode, string(b), string(eventJson)) +func shouldEnableReporting() bool { + shouldReport := true // default + if value, ok := os.LookupEnv(EnvVarShouldReportToCF); ok { + parsed, err := strconv.ParseBool(value) + if err == nil { + shouldReport = parsed + } } - - return nil + return shouldReport } diff --git a/common/boolminifier.go b/common/boolminifier.go deleted file mode 100644 index ed81c5f3c1..0000000000 --- a/common/boolminifier.go +++ /dev/null @@ -1,336 +0,0 @@ -package common - -import ( - "fmt" - "math" - "sort" - "strconv" - "strings" - - "github.com/Knetic/govaluate" - "github.com/pkg/errors" -) - -const ( - variableFlag = "VARIABLE-" -) - -// Minifier is a bool expression minifier -type Minifier interface { - GetExpression() string -} - -type term []int - -type expr struct { - expression *govaluate.EvaluableExpression - variables []string - minterms []term - table []tableRow -} - -type tableRow struct { - term term - postfix bool -} - -// NewBoolExpression returns a Minifier instance -// It is used to simplify boolean epressions. -// For example, "(a || b || c) && (a && b)" can be simplified as "a && b" -// It is achieved by using Quine–McCluskey algorithm. -// See https://en.wikipedia.org/wiki/Quine%E2%80%93McCluskey_algorithm -func NewBoolExpression(expression string) (Minifier, error) { - expression = strings.ReplaceAll(expression, "-", "\\-") - expression = strings.ReplaceAll(expression, ":", "\\:") - ex, err := govaluate.NewEvaluableExpression(expression) - if err != nil { - return nil, err - } - // Validate - for _, token := range ex.Tokens() { - switch token.Kind { - case govaluate.VARIABLE: - case govaluate.LOGICALOP: - case govaluate.CLAUSE: - case govaluate.CLAUSE_CLOSE: - continue - default: - return nil, errors.New("unsupported symbol found") - } - } - - vars := []string{} - keys := make(map[string]bool) - for _, v := range ex.Vars() { - if _, ok := keys[v]; !ok { - keys[v] = true - vars = append(vars, v) - } - } - sort.Strings(vars) - return &expr{ - expression: ex, - variables: vars, - }, nil -} - -func (e *expr) GetExpression() string { - e.generateTable() - if len(e.variables) == 0 || len(e.table) == 0 { - return "" - } - for _, tr := range e.table { - if tr.postfix { - e.minterms = append(e.minterms, tr.term) - } - } - - saver := []term{} - for i := 0; i < len(e.variables) && len(e.minterms) > 1; i++ { - compared := make([]bool, len(e.minterms)) - for j := 0; j < len(e.minterms)-1; j++ { - for k := j + 1; k < len(e.minterms); k++ { - temp := term{} - for l := 0; l < len(e.variables); l++ { - if e.minterms[j][l] == e.minterms[k][l] { - temp = append(temp, l) - } - } - if len(temp) == len(e.variables)-1 { - saver, compared = e.saveValue(temp, saver, compared, j, k) - } - } - } - saver = e.addOther(saver, compared) - - if len(saver) > 0 { - e.minterms = []term{} - e.minterms = append(e.minterms, saver...) - } - - // remove duplicates - for i := 0; i < len(e.minterms); i++ { - for j := i + 1; j < len(e.minterms); j++ { - if termEqual(e.minterms[i], e.minterms[j]) { - // delete j - e.minterms = append(e.minterms[:j], e.minterms[j+1:]...) - j-- - } - } - } - saver = []term{} - } - return e.getMinified() -} - -func (e *expr) saveValue(temp term, saver []term, compared []bool, start, end int) ([]term, []bool) { - if len(temp) == len(e.variables)-1 { - for i := 0; i < len(e.minterms[start]); i++ { - if i == len(temp) { - temp = append(temp, -1) - } else if i != temp[i] { - // insert temp[i] = -1 - temp = append(temp, 0) - copy(temp[i+1:], temp[i:]) - temp[i] = -1 - } - } - - t := term{} - for i := 0; i < len(temp); i++ { - if temp[i] == -1 { - t = append(t, -1) - } else { - t = append(t, e.minterms[start][i]) - } - } - saver = append(saver, t) - - compared[start] = true - compared[end] = true - } - return saver, compared -} - -func (e *expr) addOther(saver []term, compared []bool) []term { - for i := 0; i < len(e.minterms); i++ { - if len(compared) > 0 && !compared[i] { - t := term{} - for j := 0; j < len(e.minterms[i]); j++ { - t = append(t, e.minterms[i][j]) - } - saver = append(saver, t) - } - } - return saver -} - -func (e *expr) getMinified() string { - orVars := []string{} - for _, t := range e.minterms { - andVars := []string{} - for i := 0; i < len(t); i++ { - if t[i] == -1 { - continue - } - andVars = append(andVars, e.variables[i]) - } - if len(andVars) > 1 { - orVars = append(orVars, fmt.Sprintf("(%s)", strings.Join(andVars, " && "))) - } else if len(andVars) == 1 { - orVars = append(orVars, andVars[0]) - } - } - switch { - case len(orVars) == 1: - if strings.HasPrefix(orVars[0], "(") && strings.HasSuffix(orVars[0], ")") { - return orVars[0][1 : len(orVars[0])-1] - } - return orVars[0] - case len(orVars) > 1: - return strings.Join(orVars, " || ") - default: - return "" - } -} - -func (e *expr) generateTable() { - valueTable := getTable(len(e.variables)) - postfix := e.infixToPostfix() - for _, t := range valueTable { - e.table = append(e.table, tableRow{ - term: t, - postfix: e.evaluatePostfix(e.variables, t, postfix), - }) - } -} - -func (e *expr) infixToPostfix() []string { - postfix := []string{} - operators := stringStack{} - for _, token := range e.expression.Tokens() { - switch token.Kind { - case govaluate.CLAUSE: - operators.push("(") - case govaluate.CLAUSE_CLOSE: - for operators.peek() != "(" { - postfix = append(postfix, operators.pop()) - } - // pop up "(" - operators.pop() - case govaluate.LOGICALOP: - for !operators.isEmpty() && rank(token.Value) <= rank(operators.peek()) { - postfix = append(postfix, operators.pop()) - } - operators.push(fmt.Sprintf("%v", token.Value)) - default: - // VARIABLE - postfix = append(postfix, fmt.Sprintf("%s%v", variableFlag, token.Value)) - } - } - - for !operators.isEmpty() { - postfix = append(postfix, operators.pop()) - } - return postfix -} - -func (e *expr) evaluatePostfix(vars []string, set term, postfix []string) bool { - varMap := make(map[string]int) - for index, v := range e.variables { - varMap[v] = index - } - - operands := []int{} - for _, p := range postfix { - if strings.HasPrefix(p, variableFlag) { - v := p[len(variableFlag):] - index := varMap[v] - operands = append(operands, set[index]) - continue - } - switch { - case p == "||": - n := len(operands) - 1 - operands[n-1] = operands[n] + operands[n-1] - operands[n]*operands[n-1] - operands = operands[:n] - case p == "&&": - n := len(operands) - 1 - operands[n-1] = operands[n] * operands[n-1] - operands = operands[:n] - } - } - return operands[len(operands)-1] > 0 -} - -type stringStack struct { - strings []string -} - -func (ss *stringStack) push(str string) { - ss.strings = append(ss.strings, str) -} - -func (ss *stringStack) pop() string { - n := len(ss.strings) - 1 - result := ss.strings[n] - ss.strings = ss.strings[:n] - return result -} - -func (ss *stringStack) peek() string { - n := len(ss.strings) - 1 - return ss.strings[n] -} - -func (ss *stringStack) isEmpty() bool { - return len(ss.strings) == 0 -} - -func getTable(varSize int) []term { - max := int(math.Pow(float64(2), float64(varSize))) - result := []term{} - for i := 0; i < max; i++ { - result = append(result, getRow(i, varSize)) - } - return result -} - -func getRow(num, varSize int) term { - binaryStr := strconv.FormatInt(int64(num), 2) - for len(binaryStr) < varSize { - binaryStr = "0" + binaryStr - } - t := []int{} - for i := 0; i < len(binaryStr); i++ { - if binaryStr[i] == '0' { - t = append(t, 0) - } else { - t = append(t, 1) - } - } - return term(t) -} - -func rank(operator interface{}) int { - switch { - case operator == "||": - return 1 - case operator == "&&": - return 2 - default: - return 0 - } -} - -func termEqual(a, b term) bool { - if len(a) != len(b) { - return false - } - for i, v := range a { - if v != b[i] { - return false - } - } - return true -} diff --git a/common/boolminifier_test.go b/common/boolminifier_test.go deleted file mode 100644 index 3f895c7ac3..0000000000 --- a/common/boolminifier_test.go +++ /dev/null @@ -1,57 +0,0 @@ -package common - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSimplifyBoolExpression(t *testing.T) { - tests := []struct { - expression string - expect string - }{ - { - expression: "a || b", - expect: "b || a", - }, - { - expression: "a && b", - expect: "a && b", - }, - { - expression: "a_a || b_b", - expect: "b_b || a_a", - }, - { - expression: "a-a && b-b", - expect: "a-a && b-b", - }, - { - expression: "(a || b || c || d || e) && (c && a)", - expect: "a && c", - }, - { - expression: "(a || b) && c", - expect: "(b && c) || (a && c)", - }, - { - expression: "((a && b) || (c && d)) && c", - expect: "(c && d) || (a && b && c)", - }, - { - expression: "((a && b) || (c && d)) || c", - expect: "c || (a && b)", - }, - { - expression: "a:a && b:b", - expect: "a:a && b:b", - }, - } - - for _, test := range tests { - expr, err := NewBoolExpression(test.expression) - assert.NoError(t, err) - assert.Equal(t, test.expect, expr.GetExpression()) - } -} diff --git a/common/common.go b/common/common.go index a0c9d50096..c45b4fb004 100644 --- a/common/common.go +++ b/common/common.go @@ -17,9 +17,9 @@ limitations under the License. package common import ( + "fmt" "reflect" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" ) @@ -29,6 +29,12 @@ const ( EnvVarKubeConfig = "KUBECONFIG" // EnvVarDebugLog is the env var to turn on the debug mode for logging EnvVarDebugLog = "DEBUG_LOG" + // ENVVarPodName should be set to the name of the pod + EnvVarPodName = "POD_NAME" + // ENVVarLeaderElection sets the leader election mode + EnvVarLeaderElection = "LEADER_ELECTION" + // EnvImagePullPolicy is the env var to set container's ImagePullPolicy + EnvImagePullPolicy = "IMAGE_PULL_POLICY" ) // EventBus related @@ -40,9 +46,51 @@ const ( // volumeMount path for eventbus auth file EventBusAuthFileMountPath = "/etc/eventbus/auth" // Default NATS Streaming messages max age - NATSStreamingMaxAge = "72h" + STANMaxAge = "72h" + // Default NATS Streaming max messages per channel + STANMaxMsgs = uint64(1000000) + // Default NATS Streaming max subscriptions per channel + STANMaxSubs = uint64(1000) + // Default NATS Streaming max total size of messages per channel + STANMaxBytes = "1GB" + // Default NATS Streaming max size of message payload + STANMaxPayload = "1MB" + // Default NATS Streaming RAFT heartbeat timeout + STANRaftHeartbeatTimeout = "2s" + // Default NATS Streaming RAFT election timeout + STANRaftElectionTimeout = "2s" + // Default NATS Streaming RAFT lease timeout + STANRaftLeaseTimeout = "1s" + // Default NATS Streaming RAFT commit timeout + STANRaftCommitTimeout = "100ms" + // Default EventBus name DefaultEventBusName = "default" + + // key of auth server secret + JetStreamServerSecretAuthKey = "auth" + // key of encryption server secret + JetStreamServerSecretEncryptionKey = "encryption" + // key of client auth secret + JetStreamClientAuthSecretKey = "client-auth" + // key for server private key + JetStreamServerPrivateKeyKey = "private-key" + // key for server TLS certificate + JetStreamServerCertKey = "cert" + // key for server CA certificate + JetStreamServerCACertKey = "ca-cert" + // key for server private key + JetStreamClusterPrivateKeyKey = "cluster-private-key" + // key for server TLS certificate + JetStreamClusterCertKey = "cluster-cert" + // key for server CA certificate + JetStreamClusterCACertKey = "cluster-ca-cert" + // key of nats-js.conf in the configmap + JetStreamConfigMapKey = "nats-js" + // Jetstream Stream name + JetStreamStreamName = "default" + // Default JetStream max size of message payload + JetStreamMaxPayload = "1MB" ) // Sensor constants @@ -66,7 +114,7 @@ const ( ) var ( - ErrNilEventSource = errors.New("event source can't be nil") + ErrNilEventSource = fmt.Errorf("event source can't be nil") ) // Miscellaneous Labels @@ -75,6 +123,8 @@ const ( LabelOwnerName = "owner-name" // AnnotationResourceSpecHash is the annotation of a K8s resource spec hash AnnotationResourceSpecHash = "resource-spec-hash" + // AnnotationLeaderElection is the annotation for leader election + AnnotationLeaderElection = "events.argoproj.io/leader-election" ) // various supported media types @@ -89,9 +139,12 @@ const ( SensorMetricsPort = 7777 ControllerMetricsPort = 7777 EventBusMetricsPort = 7777 + ControllerHealthPort = 8081 ) var ( SecretKeySelectorType = reflect.TypeOf(&corev1.SecretKeySelector{}) ConfigMapKeySelectorType = reflect.TypeOf(&corev1.ConfigMapKeySelector{}) ) + +type Object = map[string]interface{} diff --git a/common/cronutil.go b/common/cronutil.go new file mode 100644 index 0000000000..a46d8b032f --- /dev/null +++ b/common/cronutil.go @@ -0,0 +1,134 @@ +package common + +import ( + "fmt" + "time" + + cronlib "github.com/robfig/cron/v3" +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// For a given cron specification, return the previous activation time +// If no time can be found to satisfy the schedule, return the zero time. +func PrevCronTime(cronSpec string, parser cronlib.Parser, t time.Time) (time.Time, error) { + var tm time.Time + sched, err := parser.Parse(cronSpec) + if err != nil { + return tm, fmt.Errorf("can't derive previous Cron time for cron spec %s; couldn't parse; err=%v", cronSpec, err) + } + s, castOk := sched.(*cronlib.SpecSchedule) + if !castOk { + return tm, fmt.Errorf("can't derive previous Cron time for cron spec %s: unexpected type for %v", cronSpec, sched) + } + + // General approach is based on approach to SpecSchedule.Next() implementation + + origLocation := t.Location() + loc := s.Location + if loc == time.Local { + loc = t.Location() + } + if s.Location != time.Local { + t = t.In(s.Location) + } + + // Start at the previous second + t = t.Add(-1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // If no time is found within five years, return zero. + yearLimit := t.Year() - 5 + +WRAP: + if t.Year() < yearLimit { + return tm, fmt.Errorf("can't derive previous Cron time for cron spec %s: no time found within %d years", cronSpec, yearLimit) + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 12 { + t = t.Add(time.Duration(24-t.Hour()) * time.Hour) + } else { + t = t.Add(time.Duration(-t.Hour()) * time.Hour) + } + } + + t = t.Add(-1 * time.Second) + + if saveMonth != t.Month() { + goto WRAP + } + } + + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/common/cronutil_test.go b/common/cronutil_test.go new file mode 100644 index 0000000000..cf89912b5c --- /dev/null +++ b/common/cronutil_test.go @@ -0,0 +1,106 @@ +package common + +import ( + "strings" + "testing" + "time" + + cronlib "github.com/robfig/cron/v3" +) + +func TestPrevCronTime(t *testing.T) { + runs := []struct { + time, spec string + expected string + expectedErr bool + }{ + // Simple cases + {"Mon Jul 9 15:00 2012", "0 0/15 * * * *", "Mon Jul 9 14:45 2012", false}, + {"Mon Jul 9 14:59 2012", "0 0/15 * * * *", "Mon Jul 9 14:45 2012", false}, + {"Mon Jul 9 15:01:59 2012", "0 0/15 * * * *", "Mon Jul 9 15:00 2012", false}, + + // Wrap around hours + {"Mon Jul 9 15:10 2012", "0 20-35/15 * * * *", "Mon Jul 9 14:35 2012", false}, + + // Wrap around days + {"Tue Jul 10 00:00 2012", "0 */15 * * * *", "Tue Jul 9 23:45 2012", false}, + {"Tue Jul 10 00:00 2012", "0 20-35/15 * * * *", "Tue Jul 9 23:35 2012", false}, + + // Wrap around months + {"Mon Jul 9 09:35 2012", "0 0 12 9 Apr-Oct ?", "Sat Jun 9 12:00 2012", false}, + + // Leap year + {"Mon Jul 9 23:35 2018", "0 0 0 29 Feb ?", "Mon Feb 29 00:00 2016", false}, + + // Daylight savings time 3am EDT (-4) -> 2am EST (-5) + {"2013-03-11T02:30:00-0400", "TZ=America/New_York 0 0 12 9 Mar ?", "2013-03-09T12:00:00-0500", false}, + + // hourly job + {"2012-03-11T01:00:00-0500", "TZ=America/New_York 0 0 * * * ?", "2012-03-11T00:00:00-0500", false}, + + // 2am nightly job (skipped) + {"2012-03-12T00:00:00-0400", "TZ=America/New_York 0 0 2 * * ?", "2012-03-10T02:00:00-0500", false}, + + // 2am nightly job + {"2012-11-04T02:00:00-0500", "TZ=America/New_York 0 0 0 * * ?", "2012-11-04T00:00:00-0400", false}, + {"2012-11-05T02:00:00-0500", "TZ=America/New_York 0 0 2 * * ?", "2012-11-04T02:00:00-0500", false}, + + // Unsatisfiable + {"Mon Jul 9 23:35 2012", "0 0 0 30 Feb ?", "", true}, + {"Mon Jul 9 23:35 2012", "0 0 0 31 Apr ?", "", true}, + + // Monthly job + {"TZ=America/New_York 2012-12-03T00:00:00-0500", "0 0 3 3 * ?", "2012-11-03T03:00:00-0400", false}, + } + + parser := cronlib.NewParser(cronlib.Second | cronlib.Minute | cronlib.Hour | cronlib.Dom | cronlib.Month | cronlib.DowOptional | cronlib.Descriptor) + + for _, c := range runs { + actual, err := PrevCronTime(c.spec, parser, getTime(c.time)) + if c.expectedErr { + if err == nil { + t.Errorf("%s, \"%s\": should have received error but didn't", c.time, c.spec) + } + } else { + if err != nil { + t.Errorf("%s, \"%s\": error: %v", c.time, c.spec, err) + } else { + expected := getTime(c.expected) + if !actual.Equal(expected) { + t.Errorf("%s, \"%s\": (expected) %v != %v (actual)", c.time, c.spec, expected, actual) + } + } + } + } +} + +func getTime(value string) time.Time { + if value == "" { + return time.Time{} + } + + var location = time.Local + if strings.HasPrefix(value, "TZ=") { + parts := strings.Fields(value) + loc, err := time.LoadLocation(parts[0][len("TZ="):]) + if err != nil { + panic("could not parse location:" + err.Error()) + } + location = loc + value = parts[1] + } + + var layouts = []string{ + "Mon Jan 2 15:04 2006", + "Mon Jan 2 15:04:05 2006", + } + for _, layout := range layouts { + if t, err := time.ParseInLocation(layout, value, location); err == nil { + return t + } + } + if t, err := time.ParseInLocation("2006-01-02T15:04:05-0700", value, location); err == nil { + return t + } + panic("could not parse time value " + value) +} diff --git a/common/expr/eval.go b/common/expr/eval.go new file mode 100644 index 0000000000..a29c78bed1 --- /dev/null +++ b/common/expr/eval.go @@ -0,0 +1,79 @@ +package expr + +import ( + "fmt" + + "sort" + "strings" + + "encoding/json" + + "github.com/antonmedv/expr" + "github.com/doublerebel/bellows" + + sprig "github.com/Masterminds/sprig/v3" + exprpkg "github.com/argoproj/pkg/expr" +) + +func EvalBool(input string, env interface{}) (bool, error) { + result, err := expr.Eval(input, env) + if err != nil { + return false, fmt.Errorf("unable to evaluate expression '%s': %s", input, err) + } + resultBool, ok := result.(bool) + if !ok { + return false, fmt.Errorf("unable to cast expression result '%s' to bool", result) + } + return resultBool, nil +} + +var sprigFuncMap = sprig.GenericFuncMap() // a singleton for better performance + +func init() { + delete(sprigFuncMap, "env") + delete(sprigFuncMap, "expandenv") +} + +func GetFuncMap(m map[string]interface{}) map[string]interface{} { + env := Expand(m) + for k, v := range exprpkg.GetExprEnvFunctionMap() { + env[k] = v + } + env["toJson"] = toJson + env["sprig"] = sprigFuncMap + return env +} + +func toJson(v interface{}) string { + output, err := json.Marshal(v) + if err != nil { + return "" + } + return string(output) +} + +func Expand(m map[string]interface{}) map[string]interface{} { + return bellows.Expand(removeConflicts(m)) +} + +// It is possible for the map to contain conflicts: +// {"a.b": 1, "a": 2} +// What should the result be? We remove the less-specific key. +// {"a.b": 1, "a": 2} -> {"a.b": 1, "a": 2} +func removeConflicts(m map[string]interface{}) map[string]interface{} { + var keys []string + n := map[string]interface{}{} + for k, v := range m { + keys = append(keys, k) + n[k] = v + } + sort.Strings(keys) + for i := 0; i < len(keys)-1; i++ { + k := keys[i] + // remove any parent that has a child + if strings.HasPrefix(keys[i+1], k+".") { + delete(n, k) + } + } + return n +} diff --git a/common/expr/eval_test.go b/common/expr/eval_test.go new file mode 100644 index 0000000000..4fcc06db65 --- /dev/null +++ b/common/expr/eval_test.go @@ -0,0 +1,105 @@ +package expr + +import ( + "fmt" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestExpand(t *testing.T) { + for i := 0; i < 1; i++ { // loop 100 times, because map ordering is not determisitic + t.Run(fmt.Sprint(i), func(t *testing.T) { + before := map[string]interface{}{ + "a.b": 1, + "a.c.d": 2, + "a": 3, // should be deleted + "ab": 4, + "abb": 5, // should be kept + } + after := Expand(before) + assert.Len(t, before, 5, "original map unchanged") + assert.Equal(t, map[string]interface{}{ + "a": map[string]interface{}{ + "b": 1, + "c": map[string]interface{}{ + "d": 2, + }, + }, + "ab": 4, + "abb": 5, + }, after) + }) + } +} + +func TestEvalBool(t *testing.T) { + env := map[string]interface{}{ + "id": 1, + "first_name": "John", + "last_name": "Doe", + "email": "johndoe@intuit.com", + "gender": "Male", + "dept": "devp", + "uuid": "test-case-hyphen", + } + + pass, err := EvalBool("(id == 1) && (last_name == 'Doe')", env) + assert.NoError(t, err) + assert.True(t, pass) + + pass, err = EvalBool("(id == 2) || (gender == 'Female')", env) + assert.NoError(t, err) + assert.False(t, pass) + + pass, err = EvalBool("invalidexpression", env) + assert.Error(t, err) + assert.False(t, pass) + + // expr with '-' evaluate the same as others + pass, err = EvalBool("(uuid == 'test-case-hyphen')", env) + assert.NoError(t, err) + assert.True(t, pass) +} + +func TestRemoveConflictingKeys(t *testing.T) { + testCases := []struct { + name string + input map[string]interface{} + output map[string]interface{} + }{ + { + name: "remove conflicting keys", + input: map[string]interface{}{ + "a.b": 1, + "a": 2, + }, + output: map[string]interface{}{ + "a.b": 1, + }, + }, + { + name: "no conflicts", + input: map[string]interface{}{ + "a": 1, + "b": 2, + "c.d": 3, + }, + output: map[string]interface{}{ + "a": 1, + "b": 2, + "c.d": 3, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := removeConflicts(tc.input) + if !reflect.DeepEqual(result, tc.output) { + t.Errorf("expected %v, but got %v", tc.output, result) + } + }) + } +} diff --git a/common/hash.go b/common/hash.go new file mode 100644 index 0000000000..5a0081e6a8 --- /dev/null +++ b/common/hash.go @@ -0,0 +1,21 @@ +package common + +import ( + "crypto/sha256" + "encoding/hex" +) + +func MustHash(v interface{}) string { + switch data := v.(type) { + case []byte: + hash := sha256.New() + if _, err := hash.Write(data); err != nil { + panic(err) + } + return hex.EncodeToString(hash.Sum(nil)) + case string: + return MustHash([]byte(data)) + default: + return MustHash([]byte(MustJSON(v))) + } +} diff --git a/common/hash_test.go b/common/hash_test.go new file mode 100644 index 0000000000..3788c876d7 --- /dev/null +++ b/common/hash_test.go @@ -0,0 +1,17 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMustHash(t *testing.T) { + assert.Equal(t, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", MustHash([]byte("abc"))) + assert.Equal(t, "d4ffe8e9ee0b48eba716706123a7187f32eae3bdcb0e7763e41e533267bd8a53", MustHash("efg")) + assert.Equal(t, "a8e084ec42eff43acd61526bef35e33ddf7a8135d6aba3b140a5cae4c8c5e10b", MustHash( + struct { + A string + B string + }{A: "aAa", B: "bBb"})) +} diff --git a/common/json.go b/common/json.go new file mode 100644 index 0000000000..124b5135e5 --- /dev/null +++ b/common/json.go @@ -0,0 +1,27 @@ +package common + +import "encoding/json" + +func MustJSON(in interface{}) string { + if data, err := json.Marshal(in); err != nil { + panic(err) + } else { + return string(data) + } +} + +// MustUnJSON unmarshalls JSON or panics. +// v - must be []byte or string +// in - must be a pointer. +func MustUnJSON(v interface{}, in interface{}) { + switch data := v.(type) { + case []byte: + if err := json.Unmarshal(data, in); err != nil { + panic(err) + } + case string: + MustUnJSON([]byte(data), in) + default: + panic("unknown type") + } +} diff --git a/common/json_test.go b/common/json_test.go new file mode 100644 index 0000000000..05cbef212b --- /dev/null +++ b/common/json_test.go @@ -0,0 +1,17 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMustJson(t *testing.T) { + assert.Equal(t, "1", MustJSON(1)) +} + +func TestUnJSON(t *testing.T) { + var in int + MustUnJSON("1", &in) + assert.Equal(t, 1, in) +} diff --git a/common/leaderelection/leaderelection.go b/common/leaderelection/leaderelection.go index eb2d8d6180..3027c02887 100644 --- a/common/leaderelection/leaderelection.go +++ b/common/leaderelection/leaderelection.go @@ -2,21 +2,32 @@ package leaderelection import ( "context" + "crypto/tls" + "fmt" + "os" + "strings" + "time" "github.com/fsnotify/fsnotify" "github.com/nats-io/graft" nats "github.com/nats-io/nats.go" - "github.com/pkg/errors" - "github.com/spf13/viper" "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" - eventbusdriver "github.com/argoproj/argo-events/eventbus/driver" - apicommon "github.com/argoproj/argo-events/pkg/apis/common" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" ) +var ( + eventBusAuthFileMountPath = common.EventBusAuthFileMountPath +) + type Elector interface { RunOrDie(context.Context, LeaderCallbacks) } @@ -26,79 +37,102 @@ type LeaderCallbacks struct { OnStoppedLeading func() } -func NewEventBusElector(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, clusterName string, clusterSize int) (Elector, error) { - logger := logging.FromContext(ctx) - var eventBusType apicommon.EventBusType - var eventBusAuth *eventbusv1alpha1.AuthStrategy - if eventBusConfig.NATS != nil { - eventBusType = apicommon.EventBusNATS - eventBusAuth = eventBusConfig.NATS.Auth - } else { - return nil, errors.New("invalid event bus") +func NewElector(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, clusterName string, clusterSize int, namespace string, leasename string, hostname string) (Elector, error) { + switch { + case eventBusConfig.Kafka != nil || strings.ToLower(os.Getenv(common.EnvVarLeaderElection)) == "k8s": + return newKubernetesElector(namespace, leasename, hostname) + case eventBusConfig.NATS != nil: + return newEventBusElector(ctx, eventBusConfig.NATS.Auth, clusterName, clusterSize, eventBusConfig.NATS.URL) + case eventBusConfig.JetStream != nil: + if eventBusConfig.JetStream.AccessSecret != nil { + return newEventBusElector(ctx, &eventbusv1alpha1.AuthStrategyBasic, clusterName, clusterSize, eventBusConfig.JetStream.URL) + } else { + return newEventBusElector(ctx, &eventbusv1alpha1.AuthStrategyNone, clusterName, clusterSize, eventBusConfig.JetStream.URL) + } + default: + return nil, fmt.Errorf("invalid event bus") } - var auth *eventbusdriver.Auth - cred := &eventbusdriver.AuthCredential{} - if eventBusAuth == nil || *eventBusAuth == eventbusv1alpha1.AuthStrategyNone { - auth = &eventbusdriver.Auth{ +} + +func newEventBusElector(ctx context.Context, authStrategy *eventbusv1alpha1.AuthStrategy, clusterName string, clusterSize int, url string) (Elector, error) { + auth, err := getEventBusAuth(ctx, authStrategy) + if err != nil { + return nil, err + } + + return &natsEventBusElector{ + clusterName: clusterName, + size: clusterSize, + url: url, + auth: auth, + }, nil +} + +func getEventBusAuth(ctx context.Context, authStrategy *eventbusv1alpha1.AuthStrategy) (*eventbuscommon.Auth, error) { + logger := logging.FromContext(ctx) + + var auth *eventbuscommon.Auth + + if authStrategy == nil || *authStrategy == eventbusv1alpha1.AuthStrategyNone { + auth = &eventbuscommon.Auth{ Strategy: eventbusv1alpha1.AuthStrategyNone, } } else { - v := viper.New() + v := common.ViperWithLogging() v.SetConfigName("auth") v.SetConfigType("yaml") - v.AddConfigPath(common.EventBusAuthFileMountPath) - err := v.ReadInConfig() - if err != nil { - return nil, errors.Errorf("failed to load auth.yaml. err: %+v", err) + v.AddConfigPath(eventBusAuthFileMountPath) + + if err := v.ReadInConfig(); err != nil { + return nil, fmt.Errorf("failed to load auth.yaml. err: %w", err) } - err = v.Unmarshal(cred) - if err != nil { + + cred := &eventbuscommon.AuthCredential{} + if err := v.Unmarshal(cred); err != nil { logger.Errorw("failed to unmarshal auth.yaml", zap.Error(err)) return nil, err } + v.WatchConfig() v.OnConfigChange(func(e fsnotify.Event) { - logger.Info("eventbus auth config file changed.") - err = v.Unmarshal(cred) - if err != nil { - logger.Errorw("failed to unmarshal auth.yaml after reloading", zap.Error(err)) - } + // Auth file changed, let it restart. + logger.Fatal("Eventbus auth config file changed, exiting..") }) - auth = &eventbusdriver.Auth{ - Strategy: *eventBusAuth, - Crendential: cred, - } - } - var elector Elector - switch eventBusType { - case apicommon.EventBusNATS: - elector = &natsEventBusElector{ - clusterName: clusterName, - size: clusterSize, - url: eventBusConfig.NATS.URL, - auth: auth, + + auth = &eventbuscommon.Auth{ + Strategy: *authStrategy, + Credential: cred, } - default: - return nil, errors.New("invalid eventbus type") } - return elector, nil + + return auth, nil } type natsEventBusElector struct { clusterName string size int url string - auth *eventbusdriver.Auth + auth *eventbuscommon.Auth } func (e *natsEventBusElector) RunOrDie(ctx context.Context, callbacks LeaderCallbacks) { log := logging.FromContext(ctx) ci := graft.ClusterInfo{Name: e.clusterName, Size: e.size} opts := &nats.DefaultOptions + // Will never give up + opts.MaxReconnect = -1 opts.Url = e.url if e.auth.Strategy == eventbusv1alpha1.AuthStrategyToken { - opts.Token = e.auth.Crendential.Token + opts.Token = e.auth.Credential.Token + } else if e.auth.Strategy == eventbusv1alpha1.AuthStrategyBasic { + opts.User = e.auth.Credential.Username + opts.Password = e.auth.Credential.Password + } + + opts.TLSConfig = &tls.Config{ // seems fine to pass this in even when we're not using TLS + InsecureSkipVerify: true, } + rpc, err := graft.NewNatsRpc(opts) if err != nil { log.Fatalw("failed to new Nats Rpc", zap.Error(err)) @@ -157,3 +191,67 @@ func (e *natsEventBusElector) RunOrDie(ctx context.Context, callbacks LeaderCall } } } + +type kubernetesElector struct { + namespace string + leasename string + hostname string +} + +func newKubernetesElector(namespace string, leasename string, hostname string) (Elector, error) { + return &kubernetesElector{ + namespace: namespace, + leasename: leasename, + hostname: hostname, + }, nil +} + +func (e *kubernetesElector) RunOrDie(ctx context.Context, callbacks LeaderCallbacks) { + logger := logging.FromContext(ctx) + + config, err := rest.InClusterConfig() + if err != nil { + logger.Fatalw("Failed to retrieve kubernetes config", zap.Error(err)) + } + + client, err := kubernetes.NewForConfig(config) + if err != nil { + logger.Fatalw("Failed to create kubernetes client", zap.Error(err)) + } + + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: e.leasename, + Namespace: e.namespace, + }, + Client: client.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: e.hostname, + }, + } + + for { + select { + case <-ctx.Done(): + return + default: + ctx, cancel := context.WithCancel(ctx) + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + ReleaseOnCancel: true, + LeaseDuration: 5 * time.Second, + RenewDeadline: 2 * time.Second, + RetryPeriod: 1 * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: callbacks.OnStartedLeading, + OnStoppedLeading: callbacks.OnStoppedLeading, + }, + }) + + // When the leader is lost, leaderelection.RunOrDie will + // cease blocking and we will cancel the context. This + // will halt all eventsource/sensor go routines. + cancel() + } + } +} diff --git a/common/leaderelection/leaderelection_test.go b/common/leaderelection/leaderelection_test.go new file mode 100644 index 0000000000..7a0de21d6c --- /dev/null +++ b/common/leaderelection/leaderelection_test.go @@ -0,0 +1,53 @@ +package leaderelection + +import ( + "context" + "os" + "testing" + + "github.com/argoproj/argo-events/common" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" +) + +var ( + configs = []eventbusv1alpha1.BusConfig{ + {NATS: &eventbusv1alpha1.NATSConfig{}}, + {JetStream: &eventbusv1alpha1.JetStreamConfig{}}, + {JetStream: &eventbusv1alpha1.JetStreamConfig{AccessSecret: &v1.SecretKeySelector{}}}, + } +) + +func TestLeaderElectionWithInvalidEventBus(t *testing.T) { + elector, err := NewElector(context.TODO(), eventbusv1alpha1.BusConfig{}, "", 0, "", "", "") + + assert.Nil(t, elector) + assert.EqualError(t, err, "invalid event bus") +} + +func TestLeaderElectionWithEventBusElector(t *testing.T) { + eventBusAuthFileMountPath = "test" + + for _, config := range configs { + elector, err := NewElector(context.TODO(), config, "", 0, "", "", "") + assert.Nil(t, err) + + _, ok := elector.(*natsEventBusElector) + assert.True(t, ok) + } +} + +func TestLeaderElectionWithKubernetesElector(t *testing.T) { + eventBusAuthFileMountPath = "test" + + os.Setenv(common.EnvVarLeaderElection, "k8s") + + for _, config := range configs { + elector, err := NewElector(context.TODO(), config, "", 0, "", "", "") + assert.Nil(t, err) + + _, ok := elector.(*kubernetesElector) + assert.True(t, ok) + } +} diff --git a/common/leaderelection/test/auth.yaml b/common/leaderelection/test/auth.yaml new file mode 100644 index 0000000000..b3345129cd --- /dev/null +++ b/common/leaderelection/test/auth.yaml @@ -0,0 +1,3 @@ +token: "token" +username: "username" +password: "password" diff --git a/common/logging/logger.go b/common/logging/logger.go index 1be27508bb..9a059f3bd8 100644 --- a/common/logging/logger.go +++ b/common/logging/logger.go @@ -18,9 +18,12 @@ package logging import ( "context" + "flag" "os" + "strconv" zap "go.uber.org/zap" + "k8s.io/klog/v2" "github.com/argoproj/argo-events/common" ) @@ -58,6 +61,11 @@ func NewArgoEventsLogger() *zap.SugaredLogger { return logger.Named("argo-events").Sugar() } +func SetKlogLevel(level int) { + klog.InitFlags(nil) + _ = flag.Set("v", strconv.Itoa(level)) +} + type loggerKey struct{} // WithLogger returns a copy of parent context in which the diff --git a/common/retry.go b/common/retry.go index fc724cf8c4..f7284fa461 100644 --- a/common/retry.go +++ b/common/retry.go @@ -20,7 +20,6 @@ import ( "fmt" "time" - "github.com/pkg/errors" apierr "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/util/wait" @@ -73,7 +72,7 @@ func Convert2WaitBackoff(backoff *apicommon.Backoff) (*wait.Backoff, error) { } f, err := factor.Float64() if err != nil { - return nil, errors.Wrap(err, "invalid factor") + return nil, fmt.Errorf("invalid factor, %w", err) } result.Factor = f @@ -83,7 +82,7 @@ func Convert2WaitBackoff(backoff *apicommon.Backoff) (*wait.Backoff, error) { } j, err := jitter.Float64() if err != nil { - return nil, errors.Wrap(err, "invalid jitter") + return nil, fmt.Errorf("invalid jitter, %w", err) } result.Jitter = j @@ -95,26 +94,22 @@ func Convert2WaitBackoff(backoff *apicommon.Backoff) (*wait.Backoff, error) { return &result, nil } -func Connect(backoff *apicommon.Backoff, conn func() error) error { +func DoWithRetry(backoff *apicommon.Backoff, f func() error) error { if backoff == nil { backoff = &DefaultBackoff } b, err := Convert2WaitBackoff(backoff) if err != nil { - return errors.Wrap(err, "invalid backoff configuration") + return fmt.Errorf("invalid backoff configuration, %w", err) } - if waitErr := wait.ExponentialBackoff(*b, func() (bool, error) { - if err = conn(); err != nil { - // return "false, err" will cover waitErr + _ = wait.ExponentialBackoff(*b, func() (bool, error) { + if err = f(); err != nil { return false, nil } return true, nil - }); waitErr != nil { - if err != nil { - return fmt.Errorf("%v: %v", waitErr, err) - } else { - return waitErr - } + }) + if err != nil { + return fmt.Errorf("failed after retries: %w", err) } return nil } diff --git a/common/retry_test.go b/common/retry_test.go index b35af7f0c6..05abc81995 100644 --- a/common/retry_test.go +++ b/common/retry_test.go @@ -24,6 +24,7 @@ import ( "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" apicommon "github.com/argoproj/argo-events/pkg/apis/common" ) @@ -43,13 +44,13 @@ func TestRetryableKubeAPIError(t *testing.T) { } func TestConnect(t *testing.T) { - err := Connect(nil, func() error { + err := DoWithRetry(nil, func() error { return fmt.Errorf("new error") }) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "new error")) - err = Connect(nil, func() error { + err = DoWithRetry(nil, func() error { return nil }) assert.Nil(t, err) @@ -58,7 +59,7 @@ func TestConnect(t *testing.T) { func TestConnectDurationString(t *testing.T) { start := time.Now() count := 2 - err := Connect(nil, func() error { + err := DoWithRetry(nil, func() error { if count == 0 { return nil } else { @@ -85,7 +86,7 @@ func TestConnectRetry(t *testing.T) { } count := 2 start := time.Now() - err := Connect(&backoff, func() error { + err := DoWithRetry(&backoff, func() error { if count == 0 { return nil } else { @@ -99,3 +100,41 @@ func TestConnectRetry(t *testing.T) { assert.Equal(t, 0, count) assert.True(t, elapsed >= 2*time.Second) } + +func TestRetryFailure(t *testing.T) { + factor := apicommon.NewAmount("1.0") + jitter := apicommon.NewAmount("1") + duration := apicommon.FromString("1s") + backoff := apicommon.Backoff{ + Duration: &duration, + Factor: &factor, + Jitter: &jitter, + Steps: 2, + } + err := DoWithRetry(&backoff, func() error { + return fmt.Errorf("this is an error") + }) + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "after retries") + assert.Contains(t, err.Error(), "this is an error") +} + +func TestConvert2WaitBackoff(t *testing.T) { + factor := apicommon.NewAmount("1.0") + jitter := apicommon.NewAmount("1") + duration := apicommon.FromString("1s") + backoff := apicommon.Backoff{ + Duration: &duration, + Factor: &factor, + Jitter: &jitter, + Steps: 2, + } + waitBackoff, err := Convert2WaitBackoff(&backoff) + assert.NoError(t, err) + assert.Equal(t, wait.Backoff{ + Duration: 1 * time.Second, + Factor: 1.0, + Jitter: 1.0, + Steps: 2, + }, *waitBackoff) +} diff --git a/common/saramaconfig.go b/common/saramaconfig.go new file mode 100644 index 0000000000..f9bf4836b1 --- /dev/null +++ b/common/saramaconfig.go @@ -0,0 +1,28 @@ +package common + +import ( + "bytes" + "fmt" + + "github.com/IBM/sarama" + "github.com/spf13/viper" +) + +// GetSaramaConfigFromYAMLString parse yaml string to sarama.config. +// Note: All the time.Duration config can not be correctly decoded because it does not implement the decode function. +func GetSaramaConfigFromYAMLString(yaml string) (*sarama.Config, error) { + v := viper.New() + v.SetConfigType("yaml") + if err := v.ReadConfig(bytes.NewBufferString(yaml)); err != nil { + return nil, err + } + cfg := sarama.NewConfig() + cfg.Producer.Return.Successes = true + if err := v.Unmarshal(cfg); err != nil { + return nil, fmt.Errorf("unable to decode into struct, %w", err) + } + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("failed validating sarama config, %w", err) + } + return cfg, nil +} diff --git a/common/saramaconfig_test.go b/common/saramaconfig_test.go new file mode 100644 index 0000000000..92cfec0bcd --- /dev/null +++ b/common/saramaconfig_test.go @@ -0,0 +1,43 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetSaramaConfigFromYAMLString(t *testing.T) { + t.Run("YAML Config", func(t *testing.T) { + var yamlExample = string(` +admin: + retry: + max: 105 +producer: + maxMessageBytes: 800 +consumer: + fetch: + min: 2 +net: + MaxOpenRequests: 5 +`) + conf, err := GetSaramaConfigFromYAMLString(yamlExample) + assert.NoError(t, err) + assert.Equal(t, 800, conf.Producer.MaxMessageBytes) + assert.Equal(t, 105, conf.Admin.Retry.Max) + assert.Equal(t, int32(2), conf.Consumer.Fetch.Min) + assert.Equal(t, 5, conf.Net.MaxOpenRequests) + }) + t.Run("Empty config", func(t *testing.T) { + conf, err := GetSaramaConfigFromYAMLString("") + assert.NoError(t, err) + assert.Equal(t, 1000000, conf.Producer.MaxMessageBytes) + assert.Equal(t, 5, conf.Admin.Retry.Max) + assert.Equal(t, int32(1), conf.Consumer.Fetch.Min) + assert.Equal(t, 5, conf.Net.MaxOpenRequests) + }) + + t.Run("NON yaml config", func(t *testing.T) { + _, err := GetSaramaConfigFromYAMLString("welcome") + assert.Error(t, err) + }) +} diff --git a/common/scram_client.go b/common/scram_client.go new file mode 100644 index 0000000000..a09f07feb3 --- /dev/null +++ b/common/scram_client.go @@ -0,0 +1,37 @@ +package common + +import ( + "crypto/sha256" + "crypto/sha512" + + "github.com/xdg-go/scram" +) + +var ( + SHA256New scram.HashGeneratorFcn = sha256.New + SHA512New scram.HashGeneratorFcn = sha512.New +) + +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/common/string.go b/common/string.go new file mode 100644 index 0000000000..190f3e7ed4 --- /dev/null +++ b/common/string.go @@ -0,0 +1,17 @@ +package common + +import ( + "crypto/rand" + "math/big" +) + +// generate a random string with given length +func RandomString(length int) string { + seeds := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + result := make([]byte, length) + for i := 0; i < length; i++ { + num, _ := rand.Int(rand.Reader, big.NewInt(int64(len(seeds)))) + result[i] = seeds[num.Int64()] + } + return string(result) +} diff --git a/common/string_keyed_map.go b/common/string_keyed_map.go new file mode 100644 index 0000000000..1c230b0b15 --- /dev/null +++ b/common/string_keyed_map.go @@ -0,0 +1,35 @@ +package common + +import "sync" + +// Concurrent Safe String keyed map +type StringKeyedMap[T any] struct { + items map[string]T + lock *sync.RWMutex +} + +func NewStringKeyedMap[T any]() StringKeyedMap[T] { + return StringKeyedMap[T]{ + items: make(map[string]T, 0), + lock: &sync.RWMutex{}, + } +} + +func (sm *StringKeyedMap[T]) Store(key string, item T) { + sm.lock.Lock() + defer sm.lock.Unlock() + sm.items[key] = item +} + +func (sm *StringKeyedMap[T]) Load(key string) (T, bool) { + sm.lock.RLock() + defer sm.lock.RUnlock() + item, ok := sm.items[key] + return item, ok +} + +func (sm *StringKeyedMap[T]) Delete(key string) { + sm.lock.Lock() + defer sm.lock.Unlock() + delete(sm.items, key) +} diff --git a/common/string_test.go b/common/string_test.go new file mode 100644 index 0000000000..3bdabcc8ff --- /dev/null +++ b/common/string_test.go @@ -0,0 +1,12 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRandomString(t *testing.T) { + str := RandomString(20) + assert.Equal(t, 20, len(str)) +} diff --git a/common/tls/tls.go b/common/tls/tls.go index 33d4c9a747..5312c380d0 100644 --- a/common/tls/tls.go +++ b/common/tls/tls.go @@ -6,17 +6,16 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "fmt" "math/big" "time" - - "github.com/pkg/errors" ) func certTemplate(org string, hosts []string, notAfter time.Time) (*x509.Certificate, error) { serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - return nil, errors.Wrap(err, "failed to generate serial number") + return nil, fmt.Errorf("failed to generate serial number, %w", err) } return &x509.Certificate{ SerialNumber: serialNumber, @@ -42,16 +41,6 @@ func createCACertTemplate(org string, hosts []string, notAfter time.Time) (*x509 return rootCert, nil } -func createServerCertTemplate(org string, hosts []string, notAfter time.Time) (*x509.Certificate, error) { - serverCert, err := certTemplate(org, hosts, notAfter) - if err != nil { - return nil, err - } - serverCert.KeyUsage = x509.KeyUsageDigitalSignature - serverCert.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} - return serverCert, err -} - // Sign the cert func createCert(template, parent *x509.Certificate, pub, parentPriv interface{}) ( cert *x509.Certificate, certPEM []byte, err error) { @@ -71,24 +60,30 @@ func createCert(template, parent *x509.Certificate, pub, parentPriv interface{}) func createCA(org string, hosts []string, notAfter time.Time) (*rsa.PrivateKey, *x509.Certificate, []byte, error) { rootKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to generate random key") + return nil, nil, nil, fmt.Errorf("failed to generate random key, %w", err) } rootCertTmpl, err := createCACertTemplate(org, hosts, notAfter) if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to generate CA cert") + return nil, nil, nil, fmt.Errorf("failed to generate CA cert, %w", err) } rootCert, rootCertPEM, err := createCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey) if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to sign CA cert") + return nil, nil, nil, fmt.Errorf("failed to sign CA cert, %w", err) } return rootKey, rootCert, rootCertPEM, nil } -// CreateCerts creates and returns a CA certificate and certificate and -// key for the server -func CreateCerts(org string, hosts []string, notAfter time.Time) (serverKey, serverCert, caCert []byte, err error) { +// CreateCerts creates and returns a CA certificate and certificate and key +// if server==true, generate these for a server +// if client==true, generate these for a client +// can generate for both server and client but at least one must be specified +func CreateCerts(org string, hosts []string, notAfter time.Time, server bool, client bool) (serverKey, serverCert, caCert []byte, err error) { + if !server && !client { + return nil, nil, nil, fmt.Errorf("CreateCerts() must specify either server or client") + } + // Create a CA certificate and private key caKey, caCertificate, caCertificatePEM, err := createCA(org, hosts, notAfter) if err != nil { @@ -96,22 +91,31 @@ func CreateCerts(org string, hosts []string, notAfter time.Time) (serverKey, ser } // Create the private key - servKey, err := rsa.GenerateKey(rand.Reader, 2048) + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to generate random key") + return nil, nil, nil, fmt.Errorf("failed to generate random key, %w", err) } - servCertTemplate, err := createServerCertTemplate(org, hosts, notAfter) + var cert *x509.Certificate + + cert, err = certTemplate(org, hosts, notAfter) if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to create server cert template") + return nil, nil, nil, err + } + cert.KeyUsage = x509.KeyUsageDigitalSignature + if server { + cert.ExtKeyUsage = append(cert.ExtKeyUsage, x509.ExtKeyUsageServerAuth) + } + if client { + cert.ExtKeyUsage = append(cert.ExtKeyUsage, x509.ExtKeyUsageClientAuth) } // create a certificate wrapping the public key, sign it with the CA private key - _, servCertPEM, err := createCert(servCertTemplate, caCertificate, &servKey.PublicKey, caKey) + _, certPEM, err := createCert(cert, caCertificate, &privateKey.PublicKey, caKey) if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to sign server cert") + return nil, nil, nil, fmt.Errorf("failed to sign server cert, %w", err) } - servKeyPEM := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(servKey), + privateKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey), }) - return servKeyPEM, servCertPEM, caCertificatePEM, nil + return privateKeyPEM, certPEM, caCertificatePEM, nil } diff --git a/common/tls/tls_test.go b/common/tls/tls_test.go index 5f3ee1183a..f2b063c455 100644 --- a/common/tls/tls_test.go +++ b/common/tls/tls_test.go @@ -3,16 +3,16 @@ package tls import ( "crypto/x509" "encoding/pem" + "fmt" "testing" "time" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" ) func TestCreateCerts(t *testing.T) { t.Run("test create certs", func(t *testing.T) { - sKey, serverCertPEM, caCertBytes, err := CreateCerts("test-org", []string{"test-host"}, time.Now().AddDate(1, 0, 0)) + sKey, serverCertPEM, caCertBytes, err := CreateCerts("test-org", []string{"test-host"}, time.Now().AddDate(1, 0, 0), true, false) assert.NoError(t, err) p, _ := pem.Decode(sKey) assert.Equal(t, "RSA PRIVATE KEY", p.Type) @@ -35,14 +35,14 @@ func validCertificate(cert []byte, t *testing.T) (*x509.Certificate, error) { const certificate = "CERTIFICATE" caCert, _ := pem.Decode(cert) if caCert.Type != certificate { - return nil, errors.Errorf("CERT type mismatch, got %s, want: %s", caCert.Type, certificate) + return nil, fmt.Errorf("CERT type mismatch, got %s, want: %s", caCert.Type, certificate) } parsedCert, err := x509.ParseCertificate(caCert.Bytes) if err != nil { - return nil, errors.Wrap(err, "failed to parse cert") + return nil, fmt.Errorf("failed to parse cert, %w", err) } if parsedCert.SignatureAlgorithm != x509.SHA256WithRSA { - return nil, errors.Errorf("signature not match. Got: %s, want: %s", parsedCert.SignatureAlgorithm, x509.SHA256WithRSA) + return nil, fmt.Errorf("signature not match. Got: %s, want: %s", parsedCert.SignatureAlgorithm, x509.SHA256WithRSA) } return parsedCert, nil } diff --git a/common/util.go b/common/util.go index 32fc84c780..a4f1d9e37e 100644 --- a/common/util.go +++ b/common/util.go @@ -1,9 +1,12 @@ /* Copyright 2018 BlackRock, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,7 +23,6 @@ import ( "encoding/json" "fmt" "hash/fnv" - "io/ioutil" "net/http" "os" "reflect" @@ -130,7 +132,7 @@ func GetSecretValue(ctx context.Context, client kubernetes.Interface, namespace } val, ok := secret.Data[selector.Key] if !ok { - return "", errors.Errorf("secret '%s' does not have the key '%s'", selector.Name, selector.Key) + return "", fmt.Errorf("secret '%s' does not have the key '%s'", selector.Name, selector.Key) } return string(val), nil } @@ -160,19 +162,19 @@ func GetSecretFromVolume(selector *v1.SecretKeySelector) (string, error) { if err != nil { return "", err } - data, err := ioutil.ReadFile(filePath) + data, err := os.ReadFile(filePath) if err != nil { - return "", errors.Wrapf(err, "failed to get secret value of name: %s, key: %s", selector.Name, selector.Key) + return "", fmt.Errorf("failed to get secret value of name: %s, key: %s, %w", selector.Name, selector.Key, err) } - // Secrets edied by tools like "vim" always have an extra invisible "\n" in the end, - // and it's often negleted, but it makes differences for some of the applications. + // Secrets edited by tools like "vim" always have an extra invisible "\n" in the end, + // and it's often neglected, but it makes differences for some of the applications. return strings.TrimSuffix(string(data), "\n"), nil } // GetSecretVolumePath returns the path of the mounted secret func GetSecretVolumePath(selector *v1.SecretKeySelector) (string, error) { if selector == nil { - return "", errors.New("secret key selector is nil") + return "", fmt.Errorf("secret key selector is nil") } return fmt.Sprintf("/argo-events/secrets/%s/%s", selector.Name, selector.Key), nil } @@ -184,9 +186,9 @@ func GetConfigMapFromVolume(selector *v1.ConfigMapKeySelector) (string, error) { if err != nil { return "", err } - data, err := ioutil.ReadFile(filePath) + data, err := os.ReadFile(filePath) if err != nil { - return "", errors.Wrapf(err, "failed to get configMap value of name: %s, key: %s", selector.Name, selector.Key) + return "", fmt.Errorf("failed to get configMap value of name: %s, key: %s, %w", selector.Name, selector.Key, err) } // Contents edied by tools like "vim" always have an extra invisible "\n" in the end, // and it's often negleted, but it makes differences for some of the applications. @@ -196,7 +198,7 @@ func GetConfigMapFromVolume(selector *v1.ConfigMapKeySelector) (string, error) { // GetConfigMapVolumePath returns the path of the mounted configmap func GetConfigMapVolumePath(selector *v1.ConfigMapKeySelector) (string, error) { if selector == nil { - return "", errors.New("configmap key selector is nil") + return "", fmt.Errorf("configmap key selector is nil") } return fmt.Sprintf("/argo-events/config/%s/%s", selector.Name, selector.Key), nil } @@ -231,10 +233,18 @@ func GenerateEnvFromConfigMapSpec(selector *v1.ConfigMapKeySelector) v1.EnvFromS } } -// GetTLSConfig returns a tls configuration for given cert and key. +// GetTLSConfig returns a tls configuration for given cert and key or skips the certs if InsecureSkipVerify is true. func GetTLSConfig(config *apicommon.TLSConfig) (*tls.Config, error) { if config == nil { - return nil, errors.New("TLSConfig is nil") + return nil, fmt.Errorf("TLSConfig is nil") + } + + if config.InsecureSkipVerify { + tlsConfig := &tls.Config{ + InsecureSkipVerify: true, + ClientAuth: 0, + } + return tlsConfig, nil } var caCertPath, clientCertPath, clientKeyPath string @@ -244,9 +254,6 @@ func GetTLSConfig(config *apicommon.TLSConfig) (*tls.Config, error) { if err != nil { return nil, err } - } else if config.DeprecatedCACertPath != "" { - // DEPRECATED. - caCertPath = config.DeprecatedCACertPath } if config.ClientCertSecret != nil { @@ -254,9 +261,6 @@ func GetTLSConfig(config *apicommon.TLSConfig) (*tls.Config, error) { if err != nil { return nil, err } - } else if config.DeprecatedClientCertPath != "" { - // DEPRECATED. - clientCertPath = config.DeprecatedClientCertPath } if config.ClientKeySecret != nil { @@ -264,26 +268,23 @@ func GetTLSConfig(config *apicommon.TLSConfig) (*tls.Config, error) { if err != nil { return nil, err } - } else if config.DeprecatedClientKeyPath != "" { - // DEPRECATED. - clientKeyPath = config.DeprecatedClientKeyPath } if len(caCertPath)+len(clientCertPath)+len(clientKeyPath) == 0 { // None of 3 is configured - return nil, errors.New("invalid tls config, neither of caCertSecret, clientCertSecret and clientKeySecret is configured") + return nil, fmt.Errorf("invalid tls config, neither of caCertSecret, clientCertSecret and clientKeySecret is configured") } if len(clientCertPath)+len(clientKeyPath) > 0 && len(clientCertPath)*len(clientKeyPath) == 0 { // Only one of clientCertSecret and clientKeySecret is configured - return nil, errors.New("invalid tls config, both of clientCertSecret and clientKeySecret need to be configured") + return nil, fmt.Errorf("invalid tls config, both of clientCertSecret and clientKeySecret need to be configured") } c := &tls.Config{} if len(caCertPath) > 0 { - caCert, err := ioutil.ReadFile(caCertPath) + caCert, err := os.ReadFile(caCertPath) if err != nil { - return nil, errors.Wrapf(err, "failed to read ca cert file %s", caCertPath) + return nil, fmt.Errorf("failed to read ca cert file %s, %w", caCertPath, err) } pool := x509.NewCertPool() pool.AppendCertsFromPEM(caCert) @@ -293,7 +294,7 @@ func GetTLSConfig(config *apicommon.TLSConfig) (*tls.Config, error) { if len(clientCertPath) > 0 && len(clientKeyPath) > 0 { clientCert, err := tls.LoadX509KeyPair(clientCertPath, clientKeyPath) if err != nil { - return nil, errors.Wrapf(err, "failed to load client cert key pair %s", caCertPath) + return nil, fmt.Errorf("failed to load client cert key pair %s, %w", caCertPath, err) } c.Certificates = []tls.Certificate{clientCert} } @@ -302,13 +303,18 @@ func GetTLSConfig(config *apicommon.TLSConfig) (*tls.Config, error) { // VolumesFromSecretsOrConfigMaps builds volumes and volumeMounts spec based on // the obj and its children's secretKeyselector or configMapKeySelector -func VolumesFromSecretsOrConfigMaps(obj interface{}, t reflect.Type) ([]v1.Volume, []v1.VolumeMount) { +func VolumesFromSecretsOrConfigMaps(t reflect.Type, objs ...interface{}) ([]v1.Volume, []v1.VolumeMount) { resultVolumes := []v1.Volume{} resultMounts := []v1.VolumeMount{} - values := findTypeValues(obj, t) + values := []interface{}{} + + for _, obj := range objs { + values = append(values, findTypeValues(obj, t)...) + } if len(values) == 0 { return resultVolumes, resultMounts } + switch t { case SecretKeySelectorType: for _, v := range values { @@ -435,3 +441,72 @@ func uniqueVolumeMounts(mounts []v1.VolumeMount) []v1.VolumeMount { } return rMounts } + +// ElementsMatch returns true if the two provided string slices contain the same elements while avoiding duplications. +// WARN: this method avoids duplications. +func ElementsMatch(first []string, second []string) bool { + if len(first) == 0 && len(second) == 0 { + return true + } + if len(first) == 0 || len(second) == 0 { + return false + } + + diff := make(map[string]int) + for _, str := range first { + diff[str] = 1 + } + + for _, str := range second { + if _, ok := diff[str]; !ok { + return false + } else { + diff[str] = 2 + } + } + + for _, v := range diff { + // 1: only exists in first + // 2: exists in both + if v < 2 { + return false + } + } + return true +} + +// SliceContains checks if a string slice contains a specific string +func SliceContains(strSlice []string, targetStr string) bool { + for _, curr := range strSlice { + if curr == targetStr { + return true + } + } + return false +} + +func GetImagePullPolicy() v1.PullPolicy { + imgPullPolicy := v1.PullAlways + if x := os.Getenv(EnvImagePullPolicy); x != "" { + imgPullPolicy = v1.PullPolicy(x) + } + return imgPullPolicy +} + +func StructToMap(obj interface{}, output map[string]interface{}) error { + data, err := json.Marshal(obj) // Convert to a json string + if err != nil { + return err + } + + return json.Unmarshal(data, &output) // Convert to a map +} + +func CopyStringMap(originalMap map[string]string) map[string]string { + newMap := make(map[string]string) + for k, v := range originalMap { + newMap[k] = v + } + + return newMap +} diff --git a/common/util_test.go b/common/util_test.go index b599525101..1725286612 100644 --- a/common/util_test.go +++ b/common/util_test.go @@ -23,6 +23,7 @@ import ( apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" ) @@ -162,39 +163,45 @@ func TestFindTypeValues(t *testing.T) { func TestVolumesFromSecretsOrConfigMaps(t *testing.T) { t.Run("test secret volumes", func(t *testing.T) { - vols, mounts := VolumesFromSecretsOrConfigMaps(&testXObj, SecretKeySelectorType) + vols, mounts := VolumesFromSecretsOrConfigMaps(SecretKeySelectorType, &testXObj) assert.Equal(t, len(vols), 6) assert.Equal(t, len(mounts), 6) }) t.Run("test configmap volumes", func(t *testing.T) { - vols, mounts := VolumesFromSecretsOrConfigMaps(&testXObj, ConfigMapKeySelectorType) + vols, mounts := VolumesFromSecretsOrConfigMaps(ConfigMapKeySelectorType, &testXObj) assert.Equal(t, len(vols), 6) assert.Equal(t, len(mounts), 6) }) } -func fakeTLSConfig(t *testing.T) *apicommon.TLSConfig { +func fakeTLSConfig(t *testing.T, insecureSkipVerify bool) *apicommon.TLSConfig { t.Helper() - return &apicommon.TLSConfig{ - CACertSecret: &corev1.SecretKeySelector{ - Key: "fake-key1", - LocalObjectReference: corev1.LocalObjectReference{ - Name: "fake-name1", + if insecureSkipVerify == true { + return &apicommon.TLSConfig{ + InsecureSkipVerify: true, + } + } else { + return &apicommon.TLSConfig{ + CACertSecret: &corev1.SecretKeySelector{ + Key: "fake-key1", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "fake-name1", + }, }, - }, - ClientCertSecret: &corev1.SecretKeySelector{ - Key: "fake-key2", - LocalObjectReference: corev1.LocalObjectReference{ - Name: "fake-name2", + ClientCertSecret: &corev1.SecretKeySelector{ + Key: "fake-key2", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "fake-name2", + }, }, - }, - ClientKeySecret: &corev1.SecretKeySelector{ - Key: "fake-key3", - LocalObjectReference: corev1.LocalObjectReference{ - Name: "fake-name3", + ClientKeySecret: &corev1.SecretKeySelector{ + Key: "fake-key3", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "fake-name3", + }, }, - }, + } } } @@ -207,7 +214,7 @@ func TestGetTLSConfig(t *testing.T) { }) t.Run("test clientKeySecret is set, clientCertSecret is empty", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) c.CACertSecret = nil c.ClientCertSecret = nil _, err := GetTLSConfig(c) @@ -216,7 +223,7 @@ func TestGetTLSConfig(t *testing.T) { }) t.Run("test only caCertSecret is set", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) c.ClientCertSecret = nil c.ClientKeySecret = nil _, err := GetTLSConfig(c) @@ -225,7 +232,7 @@ func TestGetTLSConfig(t *testing.T) { }) t.Run("test clientCertSecret and clientKeySecret are set", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) c.CACertSecret = nil _, err := GetTLSConfig(c) assert.NotNil(t, err) @@ -233,9 +240,59 @@ func TestGetTLSConfig(t *testing.T) { }) t.Run("test all of 3 are set", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) _, err := GetTLSConfig(c) assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "failed to read ca cert file")) }) } + +func TestElementsMatch(t *testing.T) { + assert.True(t, ElementsMatch(nil, nil)) + assert.True(t, ElementsMatch([]string{"hello"}, []string{"hello"})) + assert.True(t, ElementsMatch([]string{"hello", "world"}, []string{"hello", "world"})) + assert.True(t, ElementsMatch([]string{}, []string{})) + + assert.False(t, ElementsMatch([]string{"hello"}, nil)) + assert.False(t, ElementsMatch([]string{"hello"}, []string{})) + assert.False(t, ElementsMatch([]string{}, []string{"hello"})) + assert.False(t, ElementsMatch([]string{"hello"}, []string{"hello", "world"})) + assert.False(t, ElementsMatch([]string{"hello", "world"}, []string{"hello"})) + assert.False(t, ElementsMatch([]string{"hello", "world"}, []string{"hello", "moon"})) + assert.True(t, ElementsMatch([]string{"hello", "world"}, []string{"world", "hello"})) + assert.True(t, ElementsMatch([]string{"hello", "world", "hello"}, []string{"hello", "hello", "world", "world"})) + assert.True(t, ElementsMatch([]string{"world", "hello"}, []string{"hello", "hello", "world", "world"})) + assert.True(t, ElementsMatch([]string{"hello", "hello", "world", "world"}, []string{"world", "hello"})) + assert.False(t, ElementsMatch([]string{"hello"}, []string{"*", "hello"})) + assert.False(t, ElementsMatch([]string{"hello", "*"}, []string{"hello"})) + assert.False(t, ElementsMatch([]string{"*", "hello", "*"}, []string{"hello"})) + assert.False(t, ElementsMatch([]string{"hello"}, []string{"world", "world"})) + assert.False(t, ElementsMatch([]string{"hello", "hello"}, []string{"world", "world"})) +} + +func TestSliceContains(t *testing.T) { + assert.True(t, SliceContains([]string{"hello", "*"}, "*")) + assert.True(t, SliceContains([]string{"*", "world"}, "*")) + assert.True(t, SliceContains([]string{"*", "world"}, "world")) + assert.True(t, SliceContains([]string{"*", "hello", "*"}, "*")) + assert.False(t, SliceContains([]string{"hello", "world"}, "*")) +} + +func TestCopyStringMap(t *testing.T) { + m1 := map[string]string{ + "a": "aaa", + "b": "bbb", + } + m2 := CopyStringMap(m1) + + m1["a"] = "zzz" + delete(m1, "b") + + require.Equal(t, m1, map[string]string{ + "a": "zzz", + }) + require.Equal(t, m2, map[string]string{ + "a": "aaa", + "b": "bbb", + }) +} diff --git a/pkg/client/eventsource/clientset/versioned/doc.go b/common/viper.go similarity index 62% rename from pkg/client/eventsource/clientset/versioned/doc.go rename to common/viper.go index d4d9e0efaf..134c790a33 100644 --- a/pkg/client/eventsource/clientset/versioned/doc.go +++ b/common/viper.go @@ -1,11 +1,11 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2024 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +14,16 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by client-gen. DO NOT EDIT. +package common -// This package has the automatically generated clientset. -package versioned +import ( + "log/slog" + "os" + + "github.com/spf13/viper" +) + +func ViperWithLogging() *viper.Viper { + v := viper.NewWithOptions(viper.WithLogger(slog.New(slog.NewJSONHandler(os.Stdout, nil)))) + return v +} diff --git a/controllers/cmd/start.go b/controllers/cmd/start.go new file mode 100644 index 0000000000..323c8a2cab --- /dev/null +++ b/controllers/cmd/start.go @@ -0,0 +1,212 @@ +package cmd + +import ( + "fmt" + "os" + + "go.uber.org/zap" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/source" + + argoevents "github.com/argoproj/argo-events" + "github.com/argoproj/argo-events/codefresh" + "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/controllers" + "github.com/argoproj/argo-events/controllers/eventbus" + "github.com/argoproj/argo-events/controllers/eventsource" + "github.com/argoproj/argo-events/controllers/sensor" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + eventsourcev1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" +) + +const ( + imageEnvVar = "ARGO_EVENTS_IMAGE" +) + +type ArgoEventsControllerOpts struct { + Namespaced bool + ManagedNamespace string + LeaderElection bool + MetricsPort int32 + HealthPort int32 +} + +func Start(eventsOpts ArgoEventsControllerOpts) { + logger := logging.NewArgoEventsLogger().Named(eventbus.ControllerName) + config, err := controllers.LoadConfig(func(err error) { + logger.Errorw("Failed to reload global configuration file", zap.Error(err)) + }) + if err != nil { + logger.Fatalw("Failed to load global configuration file", zap.Error(err)) + } + + if err = controllers.ValidateConfig(config); err != nil { + logger.Fatalw("Global configuration file validation failed", zap.Error(err)) + } + + imageName, defined := os.LookupEnv(imageEnvVar) + if !defined { + logger.Fatalf("required environment variable '%s' not defined", imageEnvVar) + } + opts := ctrl.Options{ + Metrics: metricsserver.Options{ + BindAddress: fmt.Sprintf(":%d", eventsOpts.MetricsPort), + }, + HealthProbeBindAddress: fmt.Sprintf(":%d", eventsOpts.HealthPort), + } + if eventsOpts.Namespaced { + opts.Cache = cache.Options{ + DefaultNamespaces: map[string]cache.Config{ + eventsOpts.ManagedNamespace: {}, + }, + } + } + if eventsOpts.LeaderElection { + opts.LeaderElection = true + opts.LeaderElectionID = "argo-events-controller" + } + restConfig := ctrl.GetConfigOrDie() + mgr, err := ctrl.NewManager(restConfig, opts) + if err != nil { + logger.Fatalw("Unable to get a controller-runtime manager", zap.Error(err)) + } + kubeClient := kubernetes.NewForConfigOrDie(restConfig) + + // Readyness probe + if err := mgr.AddReadyzCheck("readiness", healthz.Ping); err != nil { + logger.Fatalw("Unable add a readiness check", zap.Error(err)) + } + + // Liveness probe + if err := mgr.AddHealthzCheck("liveness", healthz.Ping); err != nil { + logger.Fatalw("Unable add a health check", zap.Error(err)) + } + + if err := eventbusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatalw("Unable to add scheme", zap.Error(err)) + } + + if err := eventsourcev1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatalw("Unable to add EventSource scheme", zap.Error(err)) + } + + if err := sensorv1alpha1.AddToScheme(mgr.GetScheme()); err != nil { + logger.Fatalw("Unable to add Sensor scheme", zap.Error(err)) + } + + ctx := logging.WithLogger(signals.SetupSignalHandler(), logger) + + cfClient, err := codefresh.NewClient(ctx, eventsOpts.ManagedNamespace) + if err != nil { + logger.Fatalw("unable to initialise Codefresh Client", zap.Error(err)) + } + + // EventBus controller + eventBusController, err := controller.New(eventbus.ControllerName, mgr, controller.Options{ + Reconciler: eventbus.NewReconciler(mgr.GetClient(), kubeClient, mgr.GetScheme(), config, logger, cfClient), + }) + if err != nil { + logger.Fatalw("Unable to set up EventBus controller", zap.Error(err)) + } + + // Watch EventBus and enqueue EventBus object key + if err := eventBusController.Watch(source.Kind(mgr.GetCache(), &eventbusv1alpha1.EventBus{}), &handler.EnqueueRequestForObject{}, + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.LabelChangedPredicate{}, + )); err != nil { + logger.Fatalw("Unable to watch EventBus", zap.Error(err)) + } + + // Watch ConfigMaps and enqueue owning EventBus key + if err := eventBusController.Watch(source.Kind(mgr.GetCache(), &corev1.ConfigMap{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &eventbusv1alpha1.EventBus{}, handler.OnlyControllerOwner()), + predicate.GenerationChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch ConfigMaps", zap.Error(err)) + } + + // Watch StatefulSets and enqueue owning EventBus key + if err := eventBusController.Watch(source.Kind(mgr.GetCache(), &appv1.StatefulSet{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &eventbusv1alpha1.EventBus{}, handler.OnlyControllerOwner()), + predicate.GenerationChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch StatefulSets", zap.Error(err)) + } + + // Watch Services and enqueue owning EventBus key + if err := eventBusController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &eventbusv1alpha1.EventBus{}, handler.OnlyControllerOwner()), + predicate.GenerationChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch Services", zap.Error(err)) + } + + // EventSource controller + eventSourceController, err := controller.New(eventsource.ControllerName, mgr, controller.Options{ + Reconciler: eventsource.NewReconciler(mgr.GetClient(), mgr.GetScheme(), imageName, logger, cfClient), + }) + if err != nil { + logger.Fatalw("Unable to set up EventSource controller", zap.Error(err)) + } + + // Watch EventSource and enqueue EventSource object key + if err := eventSourceController.Watch(source.Kind(mgr.GetCache(), &eventsourcev1alpha1.EventSource{}), &handler.EnqueueRequestForObject{}, + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.LabelChangedPredicate{}, + )); err != nil { + logger.Fatalw("Unable to watch EventSources", zap.Error(err)) + } + + // Watch Deployments and enqueue owning EventSource key + if err := eventSourceController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &eventsourcev1alpha1.EventSource{}, handler.OnlyControllerOwner()), + predicate.GenerationChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch Deployments", zap.Error(err)) + } + + // Watch Services and enqueue owning EventSource key + if err := eventSourceController.Watch(source.Kind(mgr.GetCache(), &corev1.Service{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &eventsourcev1alpha1.EventSource{}, handler.OnlyControllerOwner()), + predicate.GenerationChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch Services", zap.Error(err)) + } + + // Sensor controller + sensorController, err := controller.New(sensor.ControllerName, mgr, controller.Options{ + Reconciler: sensor.NewReconciler(mgr.GetClient(), mgr.GetScheme(), imageName, logger, cfClient), + }) + if err != nil { + logger.Fatalw("Unable to set up Sensor controller", zap.Error(err)) + } + + // Watch Sensor and enqueue Sensor object key + if err := sensorController.Watch(source.Kind(mgr.GetCache(), &sensorv1alpha1.Sensor{}), &handler.EnqueueRequestForObject{}, + predicate.Or( + predicate.GenerationChangedPredicate{}, + predicate.LabelChangedPredicate{}, + )); err != nil { + logger.Fatalw("Unable to watch Sensors", zap.Error(err)) + } + + // Watch Deployments and enqueue owning Sensor key + if err := sensorController.Watch(source.Kind(mgr.GetCache(), &appv1.Deployment{}), + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &sensorv1alpha1.Sensor{}, handler.OnlyControllerOwner()), + predicate.GenerationChangedPredicate{}); err != nil { + logger.Fatalw("Unable to watch Deployments", zap.Error(err)) + } + + logger.Infow("Starting controller manager", "version", argoevents.GetVersion()) + if err := mgr.Start(ctx); err != nil { + logger.Fatalw("Unable to start controller manager", zap.Error(err)) + } +} diff --git a/controllers/config.go b/controllers/config.go new file mode 100644 index 0000000000..f335468a1a --- /dev/null +++ b/controllers/config.go @@ -0,0 +1,130 @@ +package controllers + +import ( + "fmt" + "strings" + + "github.com/argoproj/argo-events/common" + "github.com/fsnotify/fsnotify" +) + +type GlobalConfig struct { + EventBus *EventBusConfig `json:"eventBus"` +} + +type EventBusConfig struct { + NATS *StanConfig `json:"nats"` + JetStream *JetStreamConfig `json:"jetstream"` +} + +type StanConfig struct { + Versions []StanVersion `json:"versions"` +} + +type StanVersion struct { + Version string `json:"version"` + NATSStreamingImage string `json:"natsStreamingImage"` + MetricsExporterImage string `json:"metricsExporterImage"` +} + +type JetStreamConfig struct { + Settings string `json:"settings"` + StreamConfig string `json:"streamConfig"` + Versions []JetStreamVersion `json:"versions"` +} + +type JetStreamVersion struct { + Version string `json:"version"` + NatsImage string `json:"natsImage"` + ConfigReloaderImage string `json:"configReloaderImage"` + MetricsExporterImage string `json:"metricsExporterImage"` + StartCommand string `json:"startCommand"` +} + +func (g *GlobalConfig) supportedSTANVersions() []string { + result := []string{} + if g.EventBus == nil || g.EventBus.NATS == nil { + return result + } + for _, v := range g.EventBus.NATS.Versions { + result = append(result, v.Version) + } + return result +} + +func (g *GlobalConfig) supportedJetStreamVersions() []string { + result := []string{} + if g.EventBus == nil || g.EventBus.JetStream == nil { + return result + } + for _, v := range g.EventBus.JetStream.Versions { + result = append(result, v.Version) + } + return result +} + +func (g *GlobalConfig) GetSTANVersion(version string) (*StanVersion, error) { + if g.EventBus == nil || g.EventBus.NATS == nil { + return nil, fmt.Errorf("\"eventBus.nats\" not found in the configuration") + } + if len(g.EventBus.NATS.Versions) == 0 { + return nil, fmt.Errorf("nats streaming version configuration not found") + } + for _, r := range g.EventBus.NATS.Versions { + if r.Version == version { + return &r, nil + } + } + return nil, fmt.Errorf("unsupported version %q, supported versions: %q", version, strings.Join(g.supportedSTANVersions(), ",")) +} + +func (g *GlobalConfig) GetJetStreamVersion(version string) (*JetStreamVersion, error) { + if g.EventBus == nil || g.EventBus.JetStream == nil { + return nil, fmt.Errorf("\"eventBus.jetstream\" not found in the configuration") + } + if len(g.EventBus.JetStream.Versions) == 0 { + return nil, fmt.Errorf("jetstream version configuration not found") + } + for _, r := range g.EventBus.JetStream.Versions { + if r.Version == version { + return &r, nil + } + } + return nil, fmt.Errorf("unsupported version %q, supported versions: %q", version, strings.Join(g.supportedJetStreamVersions(), ",")) +} + +func LoadConfig(onErrorReloading func(error)) (*GlobalConfig, error) { + v := common.ViperWithLogging() + v.SetConfigName("controller-config") + v.SetConfigType("yaml") + v.AddConfigPath("/etc/argo-events") + err := v.ReadInConfig() + if err != nil { + return nil, fmt.Errorf("failed to load configuration file. %w", err) + } + r := &GlobalConfig{} + err = v.Unmarshal(r) + if err != nil { + return nil, fmt.Errorf("failed unmarshal configuration file. %w", err) + } + v.WatchConfig() + v.OnConfigChange(func(e fsnotify.Event) { + err = v.Unmarshal(r) + if err != nil { + onErrorReloading(err) + } + }) + return r, nil +} + +func ValidateConfig(config *GlobalConfig) error { + if len(config.supportedJetStreamVersions()) == 0 { + return fmt.Errorf("no jetstream versions were provided in the controller config") + } + + if len(config.supportedSTANVersions()) == 0 { + return fmt.Errorf("no stan versions were provided in the controller config") + } + + return nil +} diff --git a/controllers/eventbus/cmd/start.go b/controllers/eventbus/cmd/start.go deleted file mode 100644 index a471ae6a04..0000000000 --- a/controllers/eventbus/cmd/start.go +++ /dev/null @@ -1,129 +0,0 @@ -package cmd - -import ( - "fmt" - "os" - "reflect" - - "go.uber.org/zap" - appv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - argoevents "github.com/argoproj/argo-events" - "github.com/argoproj/argo-events/common" - "github.com/argoproj/argo-events/common/logging" - "github.com/argoproj/argo-events/controllers/eventbus" - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" - eventsourcev1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" - sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" -) - -const ( - natsStreamingEnvVar = "NATS_STREAMING_IMAGE" - natsMetricsExporterEnvVar = "NATS_METRICS_EXPORTER_IMAGE" -) - -func Start(namespaced bool, managedNamespace string) { - logger := logging.NewArgoEventsLogger().Named(eventbus.ControllerName) - natsStreamingImage, defined := os.LookupEnv(natsStreamingEnvVar) - if !defined { - logger.Fatalf("required environment variable '%s' not defined", natsStreamingEnvVar) - } - natsMetricsImage, defined := os.LookupEnv(natsMetricsExporterEnvVar) - if !defined { - logger.Fatalf("required environment variable '%s' not defined", natsMetricsExporterEnvVar) - } - opts := ctrl.Options{ - MetricsBindAddress: fmt.Sprintf(":%d", common.ControllerMetricsPort), - HealthProbeBindAddress: ":8081", - } - if namespaced { - opts.Namespace = managedNamespace - } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opts) - if err != nil { - logger.Fatalw("unable to get a controller-runtime manager", zap.Error(err)) - } - - // Readyness probe - if err := mgr.AddReadyzCheck("readiness", healthz.Ping); err != nil { - logger.Fatalw("unable add a readiness check", zap.Error(err)) - } - - // Liveness probe - if err := mgr.AddHealthzCheck("liveness", healthz.Ping); err != nil { - logger.Fatalw("unable add a health check", zap.Error(err)) - } - - if err := eventbusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatalw("unable to add scheme", zap.Error(err)) - } - - if err := eventsourcev1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatalw("unable to add EventSource scheme", zap.Error(err)) - } - - if err := sensorv1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatalw("unable to add Sensor scheme", zap.Error(err)) - } - - // A controller with DefaultControllerRateLimiter - c, err := controller.New(eventbus.ControllerName, mgr, controller.Options{ - Reconciler: eventbus.NewReconciler(mgr.GetClient(), mgr.GetScheme(), natsStreamingImage, natsMetricsImage, logger), - }) - if err != nil { - logger.Fatalw("unable to set up individual controller", zap.Error(err)) - } - - // Watch EventBus and enqueue EventBus object key - if err := c.Watch(&source.Kind{Type: &eventbusv1alpha1.EventBus{}}, &handler.EnqueueRequestForObject{}, - predicate.Or( - predicate.GenerationChangedPredicate{}, - // TODO: change to use LabelChangedPredicate with controller-runtime v0.8 - predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if e.ObjectOld == nil { - return false - } - if e.ObjectNew == nil { - return false - } - return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) - }}, - )); err != nil { - logger.Fatalw("unable to watch EventBus", zap.Error(err)) - } - - // Watch ConfigMaps and enqueue owning EventBus key - if err := c.Watch(&source.Kind{Type: &corev1.ConfigMap{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventbusv1alpha1.EventBus{}, IsController: true}, predicate.GenerationChangedPredicate{}); err != nil { - logger.Fatalw("unable to watch ConfigMaps", zap.Error(err)) - } - - // Watch Secrets and enqueue owning EventBus key - if err := c.Watch(&source.Kind{Type: &corev1.Secret{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventbusv1alpha1.EventBus{}, IsController: true}, predicate.GenerationChangedPredicate{}); err != nil { - logger.Fatalw("unable to watch Secrets", zap.Error(err)) - } - - // Watch StatefulSets and enqueue owning EventBus key - if err := c.Watch(&source.Kind{Type: &appv1.StatefulSet{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventbusv1alpha1.EventBus{}, IsController: true}, predicate.GenerationChangedPredicate{}); err != nil { - logger.Fatalw("unable to watch StatefulSets", zap.Error(err)) - } - - // Watch Services and enqueue owning EventBus key - if err := c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventbusv1alpha1.EventBus{}, IsController: true}, predicate.GenerationChangedPredicate{}); err != nil { - logger.Fatalw("unable to watch Services", zap.Error(err)) - } - - logger.Infow("starting eventbus controller", "version", argoevents.GetVersion()) - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - logger.Fatalw("unable to run eventbus controller", zap.Error(err)) - } -} diff --git a/controllers/eventbus/controller.go b/controllers/eventbus/controller.go index cdde1af81a..10528bdb30 100644 --- a/controllers/eventbus/controller.go +++ b/controllers/eventbus/controller.go @@ -7,13 +7,18 @@ import ( "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/argoproj/argo-events/codefresh" + "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/controllers" "github.com/argoproj/argo-events/controllers/eventbus/installer" "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "github.com/pkg/errors" ) const ( @@ -24,17 +29,19 @@ const ( ) type reconciler struct { - client client.Client - scheme *runtime.Scheme + client client.Client + kubeClient kubernetes.Interface + scheme *runtime.Scheme - natsStreamingImage string - natsMetricsImage string - logger *zap.SugaredLogger + config *controllers.GlobalConfig + logger *zap.SugaredLogger + + cfClient *codefresh.Client } // NewReconciler returns a new reconciler -func NewReconciler(client client.Client, scheme *runtime.Scheme, natsStreamingImage, natsMetricsImage string, logger *zap.SugaredLogger) reconcile.Reconciler { - return &reconciler{client: client, scheme: scheme, natsStreamingImage: natsStreamingImage, natsMetricsImage: natsMetricsImage, logger: logger} +func NewReconciler(client client.Client, kubeClient kubernetes.Interface, scheme *runtime.Scheme, config *controllers.GlobalConfig, logger *zap.SugaredLogger, cfClient *codefresh.Client) reconcile.Reconciler { + return &reconciler{client: client, scheme: scheme, config: config, kubeClient: kubeClient, logger: logger, cfClient: cfClient} } func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -48,13 +55,19 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, err } log := r.logger.With("namespace", eventBus.Namespace).With("eventbus", eventBus.Name) + ctx = logging.WithLogger(ctx, log) busCopy := eventBus.DeepCopy() reconcileErr := r.reconcile(ctx, busCopy) if reconcileErr != nil { log.Errorw("reconcile error", zap.Error(reconcileErr)) + r.cfClient.ReportError(errors.Wrap(reconcileErr, "reconcile error"), codefresh.ErrorContext{ + ObjectMeta: eventBus.ObjectMeta, + TypeMeta: eventBus.TypeMeta, + }) } if r.needsUpdate(eventBus, busCopy) { - if err := r.client.Update(ctx, busCopy); err != nil { + // Use a DeepCopy to update, because it will be mutated afterwards, with empty Status. + if err := r.client.Update(ctx, busCopy.DeepCopy()); err != nil { return reconcile.Result{}, err } } @@ -66,12 +79,12 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // reconcile does the real logic func (r *reconciler) reconcile(ctx context.Context, eventBus *v1alpha1.EventBus) error { - log := r.logger.With("namespace", eventBus.Namespace).With("eventbus", eventBus.Name) + log := logging.FromContext(ctx) if !eventBus.DeletionTimestamp.IsZero() { log.Info("deleting eventbus") if controllerutil.ContainsFinalizer(eventBus, finalizerName) { // Finalizer logic should be added here. - if err := installer.Uninstall(ctx, eventBus, r.client, r.natsStreamingImage, r.natsMetricsImage, log); err != nil { + if err := installer.Uninstall(ctx, eventBus, r.client, r.kubeClient, r.config, log); err != nil { log.Errorw("failed to uninstall", zap.Error(err)) return err } @@ -84,10 +97,12 @@ func (r *reconciler) reconcile(ctx context.Context, eventBus *v1alpha1.EventBus) eventBus.Status.InitConditions() if err := ValidateEventBus(eventBus); err != nil { log.Errorw("validation failed", zap.Error(err)) - eventBus.Status.MarkDeployFailed("InvalidSpec", err.Error()) + eventBus.Status.MarkNotConfigured("InvalidSpec", err.Error()) return err + } else { + eventBus.Status.MarkConfigured() } - return installer.Install(ctx, eventBus, r.client, r.natsStreamingImage, r.natsMetricsImage, log) + return installer.Install(ctx, eventBus, r.client, r.kubeClient, r.config, log) } func (r *reconciler) needsUpdate(old, new *v1alpha1.EventBus) bool { diff --git a/controllers/eventbus/controller_test.go b/controllers/eventbus/controller_test.go index e8d510f812..02d260332c 100644 --- a/controllers/eventbus/controller_test.go +++ b/controllers/eventbus/controller_test.go @@ -5,23 +5,24 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiresource "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sfake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/controllers" "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" ) const ( - testBusName = "test-bus" - testStreamingImage = "test-steaming-image" - testNamespace = "testNamespace" - testURL = "http://test" + testBusName = "test-bus" + testNamespace = "testNamespace" + testURL = "http://test" ) var ( @@ -69,6 +70,30 @@ var ( }, }, } + + fakeConfig = &controllers.GlobalConfig{ + EventBus: &controllers.EventBusConfig{ + NATS: &controllers.StanConfig{ + Versions: []controllers.StanVersion{ + { + Version: "0.22.1", + NATSStreamingImage: "test-n-s-image", + MetricsExporterImage: "test-n-s-m-image", + }, + }, + }, + JetStream: &controllers.JetStreamConfig{ + Versions: []controllers.JetStreamVersion{ + { + Version: "testVersion", + NatsImage: "testJSImage", + ConfigReloaderImage: "test-nats-rl-image", + MetricsExporterImage: "testJSMetricsImage", + }, + }, + }, + }, + } ) func init() { @@ -83,10 +108,11 @@ func TestReconcileNative(t *testing.T) { ctx := context.TODO() cl := fake.NewClientBuilder().Build() r := &reconciler{ - client: cl, - scheme: scheme.Scheme, - natsStreamingImage: testStreamingImage, - logger: logging.NewArgoEventsLogger(), + client: cl, + kubeClient: k8sfake.NewSimpleClientset(), + scheme: scheme.Scheme, + config: fakeConfig, + logger: zaptest.NewLogger(t).Sugar(), } err := r.reconcile(ctx, testBus) assert.NoError(t, err) @@ -103,10 +129,11 @@ func TestReconcileExotic(t *testing.T) { ctx := context.TODO() cl := fake.NewClientBuilder().Build() r := &reconciler{ - client: cl, - scheme: scheme.Scheme, - natsStreamingImage: testStreamingImage, - logger: logging.NewArgoEventsLogger(), + client: cl, + kubeClient: k8sfake.NewSimpleClientset(), + scheme: scheme.Scheme, + config: fakeConfig, + logger: zaptest.NewLogger(t).Sugar(), } err := r.reconcile(ctx, testBus) assert.NoError(t, err) @@ -120,10 +147,11 @@ func TestNeedsUpdate(t *testing.T) { testBus := nativeBus.DeepCopy() cl := fake.NewClientBuilder().Build() r := &reconciler{ - client: cl, - scheme: scheme.Scheme, - natsStreamingImage: testStreamingImage, - logger: logging.NewArgoEventsLogger(), + client: cl, + kubeClient: k8sfake.NewSimpleClientset(), + scheme: scheme.Scheme, + config: fakeConfig, + logger: zaptest.NewLogger(t).Sugar(), } assert.False(t, r.needsUpdate(nativeBus, testBus)) controllerutil.AddFinalizer(testBus, finalizerName) diff --git a/controllers/eventbus/fuzz_test.go b/controllers/eventbus/fuzz_test.go new file mode 100644 index 0000000000..c7acf25c3c --- /dev/null +++ b/controllers/eventbus/fuzz_test.go @@ -0,0 +1,53 @@ +package eventbus + +import ( + "context" + "sync" + "testing" + + fuzz "github.com/AdaLogics/go-fuzz-headers" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/argoproj/argo-events/controllers" + + "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" +) + +var initter sync.Once + +func initScheme() { + _ = v1alpha1.AddToScheme(scheme.Scheme) + _ = appv1.AddToScheme(scheme.Scheme) + _ = corev1.AddToScheme(scheme.Scheme) +} + +func FuzzEventbusReconciler(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + initter.Do(initScheme) + f := fuzz.NewConsumer(data) + nativeBus := &v1alpha1.EventBus{} + err := f.GenerateStruct(nativeBus) + if err != nil { + return + } + cl := fake.NewClientBuilder().Build() + config := &controllers.GlobalConfig{} + err = f.GenerateStruct(config) + if err != nil { + return + } + r := &reconciler{ + client: cl, + scheme: scheme.Scheme, + config: config, + logger: logging.NewArgoEventsLogger(), + } + ctx := context.Background() + _ = r.reconcile(ctx, nativeBus) + _ = r.needsUpdate(nativeBus, nativeBus) + }) +} diff --git a/controllers/eventbus/installer/assets/jetstream/nats-cluster.conf b/controllers/eventbus/installer/assets/jetstream/nats-cluster.conf new file mode 100644 index 0000000000..21ac516c45 --- /dev/null +++ b/controllers/eventbus/installer/assets/jetstream/nats-cluster.conf @@ -0,0 +1,47 @@ +max_payload: {{.MaxPayloadSize}} +port: {{.ClientPort}} +pid_file: "/var/run/nats/nats.pid" +############### +# # +# Monitoring # +# # +############### +http: {{.MonitorPort}} +server_name: $POD_NAME +################################### +# # +# NATS JetStream # +# # +################################### +jetstream { + key: $JS_KEY + store_dir: "/data/jetstream/store" + {{.Settings}} +} + +################################### +# # +# NATS Cluster # +# # +################################### +cluster { + port: {{.ClusterPort}} + name: {{.ClusterName}} + routes: [{{.Routes}}] + cluster_advertise: $CLUSTER_ADVERTISE + connect_retries: 120 + + tls { + cert_file: "/etc/nats-config/cluster-server-cert.pem" + key_file: "/etc/nats-config/cluster-server-key.pem" + ca_file: "/etc/nats-config/cluster-ca-cert.pem" + } +} + +lame_duck_duration: 120s +################## +# # +# Authorization # +# # +################## +include ./auth.conf \ No newline at end of file diff --git a/controllers/eventbus/installer/assets/jetstream/nats.conf b/controllers/eventbus/installer/assets/jetstream/nats.conf new file mode 100644 index 0000000000..2b980d21c8 --- /dev/null +++ b/controllers/eventbus/installer/assets/jetstream/nats.conf @@ -0,0 +1,27 @@ +max_payload: {{.MaxPayloadSize}} +port: {{.ClientPort}} +pid_file: "/var/run/nats/nats.pid" +############### +# # +# Monitoring # +# # +############### +http: {{.MonitorPort}} +server_name: $POD_NAME +################################### +# # +# NATS JetStream # +# # +################################### +jetstream { + key: $JS_KEY + store_dir: "/data/jetstream/store" + {{.Settings}} +} +lame_duck_duration: 120s +################## +# # +# Authorization # +# # +################## +include ./auth.conf \ No newline at end of file diff --git a/controllers/eventbus/installer/assets/jetstream/server-auth.conf b/controllers/eventbus/installer/assets/jetstream/server-auth.conf new file mode 100644 index 0000000000..4257cd2382 --- /dev/null +++ b/controllers/eventbus/installer/assets/jetstream/server-auth.conf @@ -0,0 +1,21 @@ +system_account: sys + +accounts: { + "js": { + "jetstream": true, + "users": [ + {"user": "{{.JetStreamUser}}", "pass": "{{.JetStreamPassword}}"} + ] + }, + "sys": { + "users": [ + {"user": "sys", "pass": "{{.SysPassword}}"} + ] + } +} + +tls { + cert_file: "/etc/nats-config/server-cert.pem" + key_file: "/etc/nats-config/server-key.pem" + ca_file: "/etc/nats-config/ca-cert.pem" +} \ No newline at end of file diff --git a/controllers/eventbus/installer/exotic_jetstream.go b/controllers/eventbus/installer/exotic_jetstream.go new file mode 100644 index 0000000000..2891aba68e --- /dev/null +++ b/controllers/eventbus/installer/exotic_jetstream.go @@ -0,0 +1,43 @@ +package installer + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" +) + +// exoticJetStreamInstaller is an inalleration implementation of exotic jetstream config. +type exoticJetStreamInstaller struct { + eventBus *v1alpha1.EventBus + + logger *zap.SugaredLogger +} + +// NewExoticJetStreamInstaller return a new exoticJetStreamInstaller +func NewExoticJetStreamInstaller(eventBus *v1alpha1.EventBus, logger *zap.SugaredLogger) Installer { + return &exoticJetStreamInstaller{ + eventBus: eventBus, + logger: logger.Named("exotic-jetstream"), + } +} + +func (i *exoticJetStreamInstaller) Install(ctx context.Context) (*v1alpha1.BusConfig, error) { + JetStreamObj := i.eventBus.Spec.JetStreamExotic + if JetStreamObj == nil { + return nil, fmt.Errorf("invalid request") + } + i.eventBus.Status.MarkDeployed("Skipped", "Skip deployment because of using exotic config.") + i.logger.Info("use exotic config") + busConfig := &v1alpha1.BusConfig{ + JetStream: JetStreamObj, + } + return busConfig, nil +} + +func (i *exoticJetStreamInstaller) Uninstall(ctx context.Context) error { + i.logger.Info("nothing to uninstall") + return nil +} diff --git a/controllers/eventbus/installer/exotic_jetstream_test.go b/controllers/eventbus/installer/exotic_jetstream_test.go new file mode 100644 index 0000000000..6b1dba87cc --- /dev/null +++ b/controllers/eventbus/installer/exotic_jetstream_test.go @@ -0,0 +1,50 @@ +package installer + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" +) + +var ( + testJSExoticURL = "nats://nats:4222" + + testJSExoticBus = &v1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testExoticName, + }, + Spec: v1alpha1.EventBusSpec{ + JetStreamExotic: &v1alpha1.JetStreamConfig{ + URL: testJSExoticURL, + }, + }, + } +) + +func TestInstallationJSExotic(t *testing.T) { + t.Run("installation with exotic jetstream config", func(t *testing.T) { + installer := NewExoticJetStreamInstaller(testJSExoticBus, logging.NewArgoEventsLogger()) + conf, err := installer.Install(context.TODO()) + assert.NoError(t, err) + assert.NotNil(t, conf.JetStream) + assert.Equal(t, conf.JetStream.URL, testJSExoticURL) + }) +} + +func TestUninstallationJSExotic(t *testing.T) { + t.Run("uninstallation with exotic jetstream config", func(t *testing.T) { + installer := NewExoticJetStreamInstaller(testJSExoticBus, logging.NewArgoEventsLogger()) + err := installer.Uninstall(context.TODO()) + assert.NoError(t, err) + }) +} diff --git a/controllers/eventbus/installer/exotic_nats.go b/controllers/eventbus/installer/exotic_nats.go index a84ad386f5..ac0b3b2bd7 100644 --- a/controllers/eventbus/installer/exotic_nats.go +++ b/controllers/eventbus/installer/exotic_nats.go @@ -2,7 +2,7 @@ package installer import ( "context" - "errors" + "fmt" "go.uber.org/zap" @@ -27,10 +27,9 @@ func NewExoticNATSInstaller(eventBus *v1alpha1.EventBus, logger *zap.SugaredLogg func (i *exoticNATSInstaller) Install(ctx context.Context) (*v1alpha1.BusConfig, error) { natsObj := i.eventBus.Spec.NATS if natsObj == nil || natsObj.Exotic == nil { - return nil, errors.New("invalid request") + return nil, fmt.Errorf("invalid request") } i.eventBus.Status.MarkDeployed("Skipped", "Skip deployment because of using exotic config.") - i.eventBus.Status.MarkConfigured() i.logger.Info("use exotic config") busConfig := &v1alpha1.BusConfig{ NATS: natsObj.Exotic, diff --git a/controllers/eventbus/installer/exotic_nats_test.go b/controllers/eventbus/installer/exotic_nats_test.go index 15eaba10a8..8d2cccdb98 100644 --- a/controllers/eventbus/installer/exotic_nats_test.go +++ b/controllers/eventbus/installer/exotic_nats_test.go @@ -17,7 +17,7 @@ const ( ) var ( - testExoticBus = &v1alpha1.EventBus{ + testNatsExoticBus = &v1alpha1.EventBus{ TypeMeta: metav1.TypeMeta{ APIVersion: v1alpha1.SchemeGroupVersion.String(), Kind: "EventBus", @@ -38,7 +38,7 @@ var ( func TestInstallationExotic(t *testing.T) { t.Run("installation with exotic nats config", func(t *testing.T) { - installer := NewExoticNATSInstaller(testExoticBus, logging.NewArgoEventsLogger()) + installer := NewExoticNATSInstaller(testNatsExoticBus, logging.NewArgoEventsLogger()) conf, err := installer.Install(context.TODO()) assert.NoError(t, err) assert.NotNil(t, conf.NATS) @@ -48,7 +48,7 @@ func TestInstallationExotic(t *testing.T) { func TestUninstallationExotic(t *testing.T) { t.Run("uninstallation with exotic nats config", func(t *testing.T) { - installer := NewExoticNATSInstaller(testExoticBus, logging.NewArgoEventsLogger()) + installer := NewExoticNATSInstaller(testNatsExoticBus, logging.NewArgoEventsLogger()) err := installer.Uninstall(context.TODO()) assert.NoError(t, err) }) diff --git a/controllers/eventbus/installer/installer.go b/controllers/eventbus/installer/installer.go index 0f932c71eb..194caaa61b 100644 --- a/controllers/eventbus/installer/installer.go +++ b/controllers/eventbus/installer/installer.go @@ -2,12 +2,14 @@ package installer import ( "context" + "fmt" - "github.com/pkg/errors" "go.uber.org/zap" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/controllers" "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" eventsourcev1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -22,8 +24,8 @@ type Installer interface { } // Install function installs the event bus -func Install(ctx context.Context, eventBus *v1alpha1.EventBus, client client.Client, natsStreamingImage, natsMetricsImage string, logger *zap.SugaredLogger) error { - installer, err := getInstaller(eventBus, client, natsStreamingImage, natsMetricsImage, logger) +func Install(ctx context.Context, eventBus *v1alpha1.EventBus, client client.Client, kubeClient kubernetes.Interface, config *controllers.GlobalConfig, logger *zap.SugaredLogger) error { + installer, err := getInstaller(eventBus, client, kubeClient, config, logger) if err != nil { logger.Errorw("failed to an installer", zap.Error(err)) return err @@ -38,15 +40,21 @@ func Install(ctx context.Context, eventBus *v1alpha1.EventBus, client client.Cli } // GetInstaller returns Installer implementation -func getInstaller(eventBus *v1alpha1.EventBus, client client.Client, natsStreamingImage, natsMetricsImage string, logger *zap.SugaredLogger) (Installer, error) { +func getInstaller(eventBus *v1alpha1.EventBus, client client.Client, kubeClient kubernetes.Interface, config *controllers.GlobalConfig, logger *zap.SugaredLogger) (Installer, error) { if nats := eventBus.Spec.NATS; nats != nil { if nats.Exotic != nil { return NewExoticNATSInstaller(eventBus, logger), nil } else if nats.Native != nil { - return NewNATSInstaller(client, eventBus, natsStreamingImage, natsMetricsImage, getLabels(eventBus), logger), nil + return NewNATSInstaller(client, eventBus, config, getLabels(eventBus), kubeClient, logger), nil } + } else if js := eventBus.Spec.JetStream; js != nil { + return NewJetStreamInstaller(client, eventBus, config, getLabels(eventBus), kubeClient, logger), nil + } else if kafka := eventBus.Spec.Kafka; kafka != nil { + return NewExoticKafkaInstaller(eventBus, logger), nil + } else if js := eventBus.Spec.JetStreamExotic; js != nil { + return NewExoticJetStreamInstaller(eventBus, logger), nil } - return nil, errors.New("invalid eventbus spec") + return nil, fmt.Errorf("invalid eventbus spec") } func getLabels(bus *v1alpha1.EventBus) map[string]string { @@ -65,26 +73,26 @@ func getLabels(bus *v1alpha1.EventBus) map[string]string { // separately. // // It could also be used to check if the EventBus object can be safely deleted. -func Uninstall(ctx context.Context, eventBus *v1alpha1.EventBus, client client.Client, natsStreamingImage, natsMetricsImage string, logger *zap.SugaredLogger) error { +func Uninstall(ctx context.Context, eventBus *v1alpha1.EventBus, client client.Client, kubeClient kubernetes.Interface, config *controllers.GlobalConfig, logger *zap.SugaredLogger) error { linkedEventSources, err := linkedEventSources(ctx, eventBus.Namespace, eventBus.Name, client) if err != nil { logger.Errorw("failed to query linked EventSources", zap.Error(err)) - return errors.Wrap(err, "failed to check if there is any EventSource linked") + return fmt.Errorf("failed to check if there is any EventSource linked, %w", err) } if linkedEventSources > 0 { - return errors.Errorf("Can not delete an EventBus with %v EventSources connected", linkedEventSources) + return fmt.Errorf("can not delete an EventBus with %v EventSources connected", linkedEventSources) } linkedSensors, err := linkedSensors(ctx, eventBus.Namespace, eventBus.Name, client) if err != nil { logger.Errorw("failed to query linked Sensors", zap.Error(err)) - return errors.Wrap(err, "failed to check if there is any Sensor linked") + return fmt.Errorf("failed to check if there is any Sensor linked, %w", err) } if linkedSensors > 0 { - return errors.Errorf("Can not delete an EventBus with %v Sensors connected", linkedSensors) + return fmt.Errorf("can not delete an EventBus with %v Sensors connected", linkedSensors) } - installer, err := getInstaller(eventBus, client, natsStreamingImage, natsMetricsImage, logger) + installer, err := getInstaller(eventBus, client, kubeClient, config, logger) if err != nil { logger.Errorw("failed to get an installer", zap.Error(err)) return err diff --git a/controllers/eventbus/installer/installer_test.go b/controllers/eventbus/installer/installer_test.go index 1504b86dfd..ece0f1af3c 100644 --- a/controllers/eventbus/installer/installer_test.go +++ b/controllers/eventbus/installer/installer_test.go @@ -5,31 +5,78 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sfake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/controllers" eventsourcev1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) +const ( + testJetStreamImage = "test-js-image" + testJSReloaderImage = "test-nats-rl-image" + testJetStreamExporterImage = "test-js-e-image" +) + +var ( + fakeConfig = &controllers.GlobalConfig{ + EventBus: &controllers.EventBusConfig{ + NATS: &controllers.StanConfig{ + Versions: []controllers.StanVersion{ + { + Version: "0.22.1", + NATSStreamingImage: "test-n-s-image", + MetricsExporterImage: "test-n-s-m-image", + }, + }, + }, + JetStream: &controllers.JetStreamConfig{ + Versions: []controllers.JetStreamVersion{ + { + Version: "2.7.3", + NatsImage: testJetStreamImage, + ConfigReloaderImage: testJSReloaderImage, + MetricsExporterImage: testJetStreamExporterImage, + }, + }, + }, + }, + } +) + func TestGetInstaller(t *testing.T) { t.Run("get installer", func(t *testing.T) { - installer, err := getInstaller(testEventBus, nil, "", "", logging.NewArgoEventsLogger()) + installer, err := getInstaller(testNatsEventBus, nil, nil, fakeConfig, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.NotNil(t, installer) _, ok := installer.(*natsInstaller) assert.True(t, ok) - installer, err = getInstaller(testExoticBus, nil, "", "", logging.NewArgoEventsLogger()) + installer, err = getInstaller(testNatsExoticBus, nil, nil, fakeConfig, zaptest.NewLogger(t).Sugar()) assert.NoError(t, err) assert.NotNil(t, installer) _, ok = installer.(*exoticNATSInstaller) assert.True(t, ok) }) + + t.Run("get jetstream installer", func(t *testing.T) { + installer, err := getInstaller(testJetStreamEventBus, nil, nil, fakeConfig, zaptest.NewLogger(t).Sugar()) + assert.NoError(t, err) + assert.NotNil(t, installer) + _, ok := installer.(*jetStreamInstaller) + assert.True(t, ok) + + installer, err = getInstaller(testJetStreamExoticBus, nil, nil, fakeConfig, zaptest.NewLogger(t).Sugar()) + assert.NoError(t, err) + assert.NotNil(t, installer) + _, ok = installer.(*exoticJetStreamInstaller) + assert.True(t, ok) + }) } func init() { @@ -87,23 +134,18 @@ func fakeSensor() *sensorv1alpha1.Sensor { Namespace: testNamespace, }, Spec: sensorv1alpha1.SensorSpec{ - Triggers: []v1alpha1.Trigger{ + Triggers: []sensorv1alpha1.Trigger{ { - Template: &v1alpha1.TriggerTemplate{ + Template: &sensorv1alpha1.TriggerTemplate{ Name: "fake-trigger", - K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "k8s.io", - Version: "", - Resource: "pods", - }, + K8s: &sensorv1alpha1.StandardK8STrigger{ Operation: "create", - Source: &v1alpha1.ArtifactLocation{}, + Source: &sensorv1alpha1.ArtifactLocation{}, }, }, }, }, - Dependencies: []v1alpha1.EventDependency{ + Dependencies: []sensorv1alpha1.EventDependency{ { Name: "fake-dep", EventSourceName: "fake-source", @@ -113,3 +155,46 @@ func fakeSensor() *sensorv1alpha1.Sensor { }, } } + +func TestInstall(t *testing.T) { + kubeClient := k8sfake.NewSimpleClientset() + cl := fake.NewClientBuilder().Build() + ctx := context.TODO() + + t.Run("test nats error", func(t *testing.T) { + testObj := testNatsEventBus.DeepCopy() + testObj.Spec.NATS = nil + err := Install(ctx, testObj, cl, kubeClient, fakeConfig, zaptest.NewLogger(t).Sugar()) + assert.Error(t, err) + assert.Equal(t, "invalid eventbus spec", err.Error()) + }) + + t.Run("test nats install ok", func(t *testing.T) { + testObj := testNatsEventBus.DeepCopy() + err := Install(ctx, testObj, cl, kubeClient, fakeConfig, zaptest.NewLogger(t).Sugar()) + assert.NoError(t, err) + assert.True(t, testObj.Status.IsReady()) + assert.NotNil(t, testObj.Status.Config.NATS) + assert.NotEmpty(t, testObj.Status.Config.NATS.URL) + assert.NotNil(t, testObj.Status.Config.NATS.Auth) + assert.NotNil(t, testObj.Status.Config.NATS.AccessSecret) + }) + + t.Run("test jetstream error", func(t *testing.T) { + testObj := testJetStreamEventBus.DeepCopy() + testObj.Spec.JetStream = nil + err := Install(ctx, testObj, cl, kubeClient, fakeConfig, zaptest.NewLogger(t).Sugar()) + assert.Error(t, err) + assert.Equal(t, "invalid eventbus spec", err.Error()) + }) + + t.Run("test jetstream install ok", func(t *testing.T) { + testObj := testJetStreamEventBus.DeepCopy() + err := Install(ctx, testObj, cl, kubeClient, fakeConfig, zaptest.NewLogger(t).Sugar()) + assert.NoError(t, err) + assert.True(t, testObj.Status.IsReady()) + assert.NotNil(t, testObj.Status.Config.JetStream) + assert.NotEmpty(t, testObj.Status.Config.JetStream.URL) + assert.NotNil(t, testObj.Status.Config.JetStream.AccessSecret) + }) +} diff --git a/controllers/eventbus/installer/jetstream.go b/controllers/eventbus/installer/jetstream.go new file mode 100644 index 0000000000..0d97d88584 --- /dev/null +++ b/controllers/eventbus/installer/jetstream.go @@ -0,0 +1,790 @@ +package installer + +import ( + "bytes" + "context" + "embed" + "fmt" + "strconv" + "strings" + "text/template" + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + apiresource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/tls" + "github.com/argoproj/argo-events/controllers" + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" +) + +const ( + jsClientPort = int32(4222) + jsClusterPort = int32(6222) + jsMonitorPort = int32(8222) + jsMetricsPort = int32(7777) +) + +var ( + //go:embed assets/jetstream/* + jetStremAssets embed.FS +) + +const ( + secretServerKeyPEMFile = "server-key.pem" + secretServerCertPEMFile = "server-cert.pem" + secretCACertPEMFile = "ca-cert.pem" + + secretClusterKeyPEMFile = "cluster-server-key.pem" + secretClusterCertPEMFile = "cluster-server-cert.pem" + secretClusterCACertPEMFile = "cluster-ca-cert.pem" + + certOrg = "io.argoproj" +) + +type jetStreamInstaller struct { + client client.Client + eventBus *v1alpha1.EventBus + kubeClient kubernetes.Interface + config *controllers.GlobalConfig + labels map[string]string + logger *zap.SugaredLogger +} + +func NewJetStreamInstaller(client client.Client, eventBus *v1alpha1.EventBus, config *controllers.GlobalConfig, labels map[string]string, kubeClient kubernetes.Interface, logger *zap.SugaredLogger) Installer { + return &jetStreamInstaller{ + client: client, + kubeClient: kubeClient, + eventBus: eventBus, + config: config, + labels: labels, + logger: logger.With("eventbus", eventBus.Name), + } +} + +func (r *jetStreamInstaller) Install(ctx context.Context) (*v1alpha1.BusConfig, error) { + if js := r.eventBus.Spec.JetStream; js == nil { + return nil, fmt.Errorf("invalid jetstream eventbus spec") + } + // merge + v := viper.New() + v.SetConfigType("yaml") + if err := v.ReadConfig(bytes.NewBufferString(r.config.EventBus.JetStream.StreamConfig)); err != nil { + return nil, fmt.Errorf("invalid jetstream config in global configuration, %w", err) + } + if x := r.eventBus.Spec.JetStream.StreamConfig; x != nil { + if err := v.MergeConfig(bytes.NewBufferString(*x)); err != nil { + return nil, fmt.Errorf("failed to merge customized stream config, %w", err) + } + } + b, err := yaml.Marshal(v.AllSettings()) + if err != nil { + return nil, fmt.Errorf("failed to marshal merged buffer config, %w", err) + } + + if err := r.createSecrets(ctx); err != nil { + r.logger.Errorw("failed to create jetstream auth secrets", zap.Error(err)) + r.eventBus.Status.MarkDeployFailed("JetStreamAuthSecretsFailed", err.Error()) + return nil, err + } + if err := r.createConfigMap(ctx); err != nil { + r.logger.Errorw("failed to create jetstream ConfigMap", zap.Error(err)) + r.eventBus.Status.MarkDeployFailed("JetStreamConfigMapFailed", err.Error()) + return nil, err + } + if err := r.createService(ctx); err != nil { + r.logger.Errorw("failed to create jetstream Service", zap.Error(err)) + r.eventBus.Status.MarkDeployFailed("JetStreamServiceFailed", err.Error()) + return nil, err + } + if err := r.createStatefulSet(ctx); err != nil { + r.logger.Errorw("failed to create jetstream StatefulSet", zap.Error(err)) + r.eventBus.Status.MarkDeployFailed("JetStreamStatefulSetFailed", err.Error()) + return nil, err + } + r.eventBus.Status.MarkDeployed("Succeeded", "JetStream is deployed") + return &v1alpha1.BusConfig{ + JetStream: &v1alpha1.JetStreamConfig{ + URL: fmt.Sprintf("nats://%s.%s.svc:%s", generateJetStreamServiceName(r.eventBus), r.eventBus.Namespace, strconv.Itoa(int(jsClientPort))), + AccessSecret: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: generateJetStreamClientAuthSecretName(r.eventBus), + }, + Key: common.JetStreamClientAuthSecretKey, + }, + StreamConfig: string(b), + }, + }, nil +} + +// buildJetStreamService builds a Service for Jet Stream +func (r *jetStreamInstaller) buildJetStreamServiceSpec() corev1.ServiceSpec { + return corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Name: "tcp-client", Port: jsClientPort}, + {Name: "cluster", Port: jsClusterPort}, + {Name: "metrics", Port: jsMetricsPort}, + {Name: "monitor", Port: jsMonitorPort}, + }, + Type: corev1.ServiceTypeClusterIP, + ClusterIP: corev1.ClusterIPNone, + PublishNotReadyAddresses: true, + Selector: r.labels, + } +} + +func (r *jetStreamInstaller) createService(ctx context.Context) error { + spec := r.buildJetStreamServiceSpec() + hash := common.MustHash(spec) + obj := &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.eventBus.Namespace, + Name: generateJetStreamServiceName(r.eventBus), + Labels: r.labels, + Annotations: map[string]string{ + common.AnnotationResourceSpecHash: hash, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(r.eventBus.GetObjectMeta(), v1alpha1.SchemaGroupVersionKind), + }, + }, + Spec: spec, + } + old := &corev1.Service{} + if err := r.client.Get(ctx, client.ObjectKeyFromObject(obj), old); err != nil { + if apierrors.IsNotFound(err) { + if err := r.client.Create(ctx, obj); err != nil { + return fmt.Errorf("failed to create jetstream service, err: %w", err) + } + r.logger.Info("created jetstream service successfully") + return nil + } else { + return fmt.Errorf("failed to check if jetstream service is existing, err: %w", err) + } + } + if old.GetAnnotations()[common.AnnotationResourceSpecHash] != hash { + old.Annotations[common.AnnotationResourceSpecHash] = hash + old.Spec = spec + if err := r.client.Update(ctx, old); err != nil { + return fmt.Errorf("failed to update jetstream service, err: %w", err) + } + r.logger.Info("updated jetstream service successfully") + } + return nil +} + +func (r *jetStreamInstaller) createStatefulSet(ctx context.Context) error { + jsVersion, err := r.config.GetJetStreamVersion(r.eventBus.Spec.JetStream.Version) + if err != nil { + return fmt.Errorf("failed to get jetstream version, err: %w", err) + } + spec := r.buildStatefulSetSpec(jsVersion) + hash := common.MustHash(spec) + obj := &appv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.eventBus.Namespace, + Name: generateJetStreamStatefulSetName(r.eventBus), + Labels: r.mergeEventBusLabels(r.labels), + Annotations: map[string]string{ + common.AnnotationResourceSpecHash: hash, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(r.eventBus.GetObjectMeta(), v1alpha1.SchemaGroupVersionKind), + }, + }, + Spec: spec, + } + old := &appv1.StatefulSet{} + if err := r.client.Get(ctx, client.ObjectKeyFromObject(obj), old); err != nil { + if apierrors.IsNotFound(err) { + if err := r.client.Create(ctx, obj); err != nil { + return fmt.Errorf("failed to create jetstream statefulset, err: %w", err) + } + r.logger.Info("created jetstream statefulset successfully") + return nil + } else { + return fmt.Errorf("failed to check if jetstream statefulset is existing, err: %w", err) + } + } + if old.GetAnnotations()[common.AnnotationResourceSpecHash] != hash { + old.Annotations[common.AnnotationResourceSpecHash] = hash + old.Spec = spec + if err := r.client.Update(ctx, old); err != nil { + return fmt.Errorf("failed to update jetstream statefulset, err: %w", err) + } + r.logger.Info("updated jetstream statefulset successfully") + } + return nil +} + +func (r *jetStreamInstaller) buildStatefulSetSpec(jsVersion *controllers.JetStreamVersion) appv1.StatefulSetSpec { + js := r.eventBus.Spec.JetStream + replicas := int32(js.GetReplicas()) + podTemplateLabels := make(map[string]string) + if js.Metadata != nil && + len(js.Metadata.Labels) > 0 { + for k, v := range js.Metadata.Labels { + podTemplateLabels[k] = v + } + } + for k, v := range r.labels { + podTemplateLabels[k] = v + } + var jsContainerPullPolicy, reloaderContainerPullPolicy, metricsContainerPullPolicy corev1.PullPolicy + var jsContainerSecurityContext, reloaderContainerSecurityContext, metricsContainerSecurityContext *corev1.SecurityContext + if js.ContainerTemplate != nil { + jsContainerPullPolicy = js.ContainerTemplate.ImagePullPolicy + jsContainerSecurityContext = js.ContainerTemplate.SecurityContext + } + if js.ReloaderContainerTemplate != nil { + reloaderContainerPullPolicy = js.ReloaderContainerTemplate.ImagePullPolicy + reloaderContainerSecurityContext = js.ReloaderContainerTemplate.SecurityContext + } + if js.MetricsContainerTemplate != nil { + metricsContainerPullPolicy = js.MetricsContainerTemplate.ImagePullPolicy + metricsContainerSecurityContext = js.MetricsContainerTemplate.SecurityContext + } + shareProcessNamespace := true + terminationGracePeriodSeconds := int64(60) + spec := appv1.StatefulSetSpec{ + PodManagementPolicy: appv1.ParallelPodManagement, + Replicas: &replicas, + ServiceName: generateJetStreamServiceName(r.eventBus), + Selector: &metav1.LabelSelector{ + MatchLabels: r.labels, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podTemplateLabels, + }, + Spec: corev1.PodSpec{ + NodeSelector: js.NodeSelector, + Tolerations: js.Tolerations, + SecurityContext: js.SecurityContext, + ImagePullSecrets: js.ImagePullSecrets, + PriorityClassName: js.PriorityClassName, + Priority: js.Priority, + ServiceAccountName: js.ServiceAccountName, + Affinity: js.Affinity, + ShareProcessNamespace: &shareProcessNamespace, + TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, + Volumes: []corev1.Volume{ + {Name: "pid", VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}, + { + Name: "config-volume", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: generateJetStreamConfigMapName(r.eventBus), + }, + Items: []corev1.KeyToPath{ + { + Key: common.JetStreamConfigMapKey, + Path: "nats-js.conf", + }, + }, + }, + }, + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: generateJetStreamServerSecretName(r.eventBus), + }, + Items: []corev1.KeyToPath{ + { + Key: common.JetStreamServerSecretAuthKey, + Path: "auth.conf", + }, + { + Key: common.JetStreamServerPrivateKeyKey, + Path: secretServerKeyPEMFile, + }, + { + Key: common.JetStreamServerCertKey, + Path: secretServerCertPEMFile, + }, + { + Key: common.JetStreamServerCACertKey, + Path: secretCACertPEMFile, + }, + { + Key: common.JetStreamClusterPrivateKeyKey, + Path: secretClusterKeyPEMFile, + }, + { + Key: common.JetStreamClusterCertKey, + Path: secretClusterCertPEMFile, + }, + { + Key: common.JetStreamClusterCACertKey, + Path: secretClusterCACertPEMFile, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "main", + Image: jsVersion.NatsImage, + ImagePullPolicy: jsContainerPullPolicy, + Ports: []corev1.ContainerPort{ + {Name: "client", ContainerPort: jsClientPort}, + {Name: "cluster", ContainerPort: jsClusterPort}, + {Name: "monitor", ContainerPort: jsMonitorPort}, + }, + Command: []string{jsVersion.StartCommand, "--config", "/etc/nats-config/nats-js.conf"}, + Args: js.StartArgs, + Env: []corev1.EnvVar{ + {Name: "POD_NAME", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}}, + {Name: "SERVER_NAME", Value: "$(POD_NAME)"}, + {Name: "POD_NAMESPACE", ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.namespace"}}}, + {Name: "CLUSTER_ADVERTISE", Value: "$(POD_NAME)." + generateJetStreamServiceName(r.eventBus) + ".$(POD_NAMESPACE).svc"}, + {Name: "JS_KEY", ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{LocalObjectReference: corev1.LocalObjectReference{Name: generateJetStreamServerSecretName(r.eventBus)}, Key: common.JetStreamServerSecretEncryptionKey}}}, + }, + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/nats-config"}, + {Name: "pid", MountPath: "/var/run/nats"}, + }, + SecurityContext: jsContainerSecurityContext, + StartupProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/healthz", + Port: intstr.FromInt(int(jsMonitorPort)), + }, + }, + FailureThreshold: 30, + InitialDelaySeconds: 10, + TimeoutSeconds: 5, + }, + LivenessProbe: &corev1.Probe{ + ProbeHandler: corev1.ProbeHandler{ + HTTPGet: &corev1.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(int(jsMonitorPort)), + }, + }, + InitialDelaySeconds: 10, + PeriodSeconds: 30, + TimeoutSeconds: 5, + }, + Lifecycle: &corev1.Lifecycle{ + PreStop: &corev1.LifecycleHandler{ + Exec: &corev1.ExecAction{ + Command: []string{jsVersion.StartCommand, "-sl=ldm=/var/run/nats/nats.pid"}, + }, + }, + }, + }, + { + Name: "reloader", + Image: jsVersion.ConfigReloaderImage, + ImagePullPolicy: reloaderContainerPullPolicy, + SecurityContext: reloaderContainerSecurityContext, + Args: []string{"-pid", "/var/run/nats/nats.pid", "-config", "/etc/nats-config/nats-js.conf"}, + VolumeMounts: []corev1.VolumeMount{ + {Name: "config-volume", MountPath: "/etc/nats-config"}, + {Name: "pid", MountPath: "/var/run/nats"}, + }, + }, + { + Name: "metrics", + Image: jsVersion.MetricsExporterImage, + ImagePullPolicy: metricsContainerPullPolicy, + Ports: []corev1.ContainerPort{ + {Name: "metrics", ContainerPort: jsMetricsPort}, + }, + Args: []string{"-connz", "-routez", "-subz", "-varz", "-prefix=nats", "-use_internal_server_id", "-jsz=all", fmt.Sprintf("http://localhost:%s", strconv.Itoa(int(jsMonitorPort)))}, + SecurityContext: metricsContainerSecurityContext, + }, + }, + }, + }, + } + if js.Metadata != nil { + spec.Template.SetAnnotations(js.Metadata.Annotations) + } + + podContainers := spec.Template.Spec.Containers + containers := map[string]*corev1.Container{} + for idx := range podContainers { + containers[podContainers[idx].Name] = &podContainers[idx] + } + + if js.ContainerTemplate != nil { + containers["main"].Resources = js.ContainerTemplate.Resources + } + + if js.MetricsContainerTemplate != nil { + containers["metrics"].Resources = js.MetricsContainerTemplate.Resources + } + + if js.ReloaderContainerTemplate != nil { + containers["reloader"].Resources = js.ReloaderContainerTemplate.Resources + } + + if js.Persistence != nil { + volMode := corev1.PersistentVolumeFilesystem + // Default volume size + volSize := apiresource.MustParse("20Gi") + if js.Persistence.VolumeSize != nil { + volSize = *js.Persistence.VolumeSize + } + // Default to ReadWriteOnce + accessMode := corev1.ReadWriteOnce + if js.Persistence.AccessMode != nil { + accessMode = *js.Persistence.AccessMode + } + spec.VolumeClaimTemplates = []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: generateJetStreamPVCName(r.eventBus), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + accessMode, + }, + VolumeMode: &volMode, + StorageClassName: js.Persistence.StorageClassName, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: volSize, + }, + }, + }, + }, + } + volumeMounts := spec.Template.Spec.Containers[0].VolumeMounts + volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: generateJetStreamPVCName(r.eventBus), MountPath: "/data/jetstream"}) + spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts + } else { + // When the POD is runasnonroot, it can not create the dir /data/jetstream + // Use an emptyDirVolume + emptyDirVolName := "js-data" + volumes := spec.Template.Spec.Volumes + volumes = append(volumes, corev1.Volume{Name: emptyDirVolName, VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}}) + spec.Template.Spec.Volumes = volumes + volumeMounts := spec.Template.Spec.Containers[0].VolumeMounts + volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/data/jetstream"}) + spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts + } + return spec +} + +func (r *jetStreamInstaller) getSecret(ctx context.Context, name string) (*corev1.Secret, error) { + sl, err := r.kubeClient.CoreV1().Secrets(r.eventBus.Namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, s := range sl.Items { + if s.Name == name && metav1.IsControlledBy(&s, r.eventBus) { + return &s, nil + } + } + return nil, apierrors.NewNotFound(schema.GroupResource{}, "") +} + +func (r *jetStreamInstaller) createSecrets(ctx context.Context) error { + // first check to see if the secrets already exist + oldServerObjExisting, oldClientObjExisting := true, true + + oldSObj, err := r.getSecret(ctx, generateJetStreamServerSecretName(r.eventBus)) + if err != nil { + if apierrors.IsNotFound(err) { + oldServerObjExisting = false + } else { + return fmt.Errorf("failed to check if nats server auth secret is existing, err: %w", err) + } + } + + oldCObj, err := r.getSecret(ctx, generateJetStreamClientAuthSecretName(r.eventBus)) + if err != nil { + if apierrors.IsNotFound(err) { + oldClientObjExisting = false + } else { + return fmt.Errorf("failed to check if nats client auth secret is existing, err: %w", err) + } + } + + if !oldClientObjExisting || !oldServerObjExisting { + // Generate server-auth.conf file + encryptionKey := common.RandomString(12) + jsUser := common.RandomString(8) + jsPass := common.RandomString(16) + sysPassword := common.RandomString(24) + authTpl := template.Must(template.ParseFS(jetStremAssets, "assets/jetstream/server-auth.conf")) + var authTplOutput bytes.Buffer + if err := authTpl.Execute(&authTplOutput, struct { + JetStreamUser string + JetStreamPassword string + SysPassword string + }{ + JetStreamUser: jsUser, + JetStreamPassword: jsPass, + SysPassword: sysPassword, + }); err != nil { + return fmt.Errorf("failed to parse nats auth template, error: %w", err) + } + + // Generate TLS self signed certificate for Jetstream bus: includes TLS private key, certificate, and CA certificate + hosts := []string{} + hosts = append(hosts, fmt.Sprintf("%s.%s.svc.cluster.local", generateJetStreamServiceName(r.eventBus), r.eventBus.Namespace)) // todo: get an error in the log file related to this: do we need it? + hosts = append(hosts, fmt.Sprintf("%s.%s.svc", generateJetStreamServiceName(r.eventBus), r.eventBus.Namespace)) + + serverKeyPEM, serverCertPEM, caCertPEM, err := tls.CreateCerts(certOrg, hosts, time.Now().Add(10*365*24*time.Hour), true, false) // expires in 10 years + if err != nil { + return err + } + + // Generate TLS self signed certificate for Jetstream cluster nodes: includes TLS private key, certificate, and CA certificate + clusterNodeHosts := []string{ + fmt.Sprintf("*.%s.%s.svc.cluster.local", generateJetStreamServiceName(r.eventBus), r.eventBus.Namespace), + fmt.Sprintf("*.%s.%s.svc", generateJetStreamServiceName(r.eventBus), r.eventBus.Namespace), + } + r.logger.Infof("cluster node hosts: %+v", clusterNodeHosts) + clusterKeyPEM, clusterCertPEM, clusterCACertPEM, err := tls.CreateCerts(certOrg, clusterNodeHosts, time.Now().Add(10*365*24*time.Hour), true, true) // expires in 10 years + if err != nil { + return err + } + + serverObj := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.eventBus.Namespace, + Name: generateJetStreamServerSecretName(r.eventBus), + Labels: r.labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(r.eventBus.GetObjectMeta(), v1alpha1.SchemaGroupVersionKind), + }, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + common.JetStreamServerSecretAuthKey: authTplOutput.Bytes(), + common.JetStreamServerSecretEncryptionKey: []byte(encryptionKey), + common.JetStreamServerPrivateKeyKey: serverKeyPEM, + common.JetStreamServerCertKey: serverCertPEM, + common.JetStreamServerCACertKey: caCertPEM, + common.JetStreamClusterPrivateKeyKey: clusterKeyPEM, + common.JetStreamClusterCertKey: clusterCertPEM, + common.JetStreamClusterCACertKey: clusterCACertPEM, + }, + } + + clientAuthObj := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.eventBus.Namespace, + Name: generateJetStreamClientAuthSecretName(r.eventBus), + Labels: r.labels, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(r.eventBus.GetObjectMeta(), v1alpha1.SchemaGroupVersionKind), + }, + }, + Type: corev1.SecretTypeOpaque, + Data: map[string][]byte{ + common.JetStreamClientAuthSecretKey: []byte(fmt.Sprintf("username: %s\npassword: %s", jsUser, jsPass)), + }, + } + + if oldServerObjExisting { + if err := r.client.Delete(ctx, oldSObj); err != nil { + return fmt.Errorf("failed to delete malformed nats server auth secret, err: %w", err) + } + r.logger.Infow("deleted malformed nats server auth secret successfully") + } + + if oldClientObjExisting { + if err := r.client.Delete(ctx, oldCObj); err != nil { + return fmt.Errorf("failed to delete malformed nats client auth secret, err: %w", err) + } + r.logger.Infow("deleted malformed nats client auth secret successfully") + } + + if err := r.client.Create(ctx, serverObj); err != nil { + return fmt.Errorf("failed to create nats server auth secret, err: %w", err) + } + r.logger.Infow("created nats server auth secret successfully") + + if err := r.client.Create(ctx, clientAuthObj); err != nil { + return fmt.Errorf("failed to create nats client auth secret, err: %w", err) + } + r.logger.Infow("created nats client auth secret successfully") + } + + return nil +} + +func (r *jetStreamInstaller) createConfigMap(ctx context.Context) error { + data := make(map[string]string) + svcName := generateJetStreamServiceName(r.eventBus) + ssName := generateJetStreamStatefulSetName(r.eventBus) + replicas := r.eventBus.Spec.JetStream.GetReplicas() + routes := []string{} + for j := 0; j < replicas; j++ { + routes = append(routes, fmt.Sprintf("nats://%s-%s.%s.%s.svc:%s", ssName, strconv.Itoa(j), svcName, r.eventBus.Namespace, strconv.Itoa(int(jsClusterPort)))) + } + settings := r.config.EventBus.JetStream.Settings + if x := r.eventBus.Spec.JetStream.Settings; x != nil { + settings = *x + } + maxPayload := common.JetStreamMaxPayload + if r.eventBus.Spec.JetStream.MaxPayload != nil { + maxPayload = *r.eventBus.Spec.JetStream.MaxPayload + } + var confTpl *template.Template + if replicas > 2 { + confTpl = template.Must(template.ParseFS(jetStremAssets, "assets/jetstream/nats-cluster.conf")) + } else { + confTpl = template.Must(template.ParseFS(jetStremAssets, "assets/jetstream/nats.conf")) + } + var confTplOutput bytes.Buffer + if err := confTpl.Execute(&confTplOutput, struct { + MaxPayloadSize string + ClusterName string + MonitorPort string + ClusterPort string + ClientPort string + Routes string + Settings string + }{ + MaxPayloadSize: maxPayload, + ClusterName: r.eventBus.Name, + MonitorPort: strconv.Itoa(int(jsMonitorPort)), + ClusterPort: strconv.Itoa(int(jsClusterPort)), + ClientPort: strconv.Itoa(int(jsClientPort)), + Routes: strings.Join(routes, ","), + Settings: settings, + }); err != nil { + return fmt.Errorf("failed to parse nats config template, error: %w", err) + } + data[common.JetStreamConfigMapKey] = confTplOutput.String() + + hash := common.MustHash(data) + obj := &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.eventBus.Namespace, + Name: generateJetStreamConfigMapName(r.eventBus), + Labels: r.labels, + Annotations: map[string]string{ + common.AnnotationResourceSpecHash: hash, + }, + OwnerReferences: []metav1.OwnerReference{ + *metav1.NewControllerRef(r.eventBus.GetObjectMeta(), v1alpha1.SchemaGroupVersionKind), + }, + }, + Data: data, + } + old := &corev1.ConfigMap{} + if err := r.client.Get(ctx, client.ObjectKeyFromObject(obj), old); err != nil { + if apierrors.IsNotFound(err) { + if err := r.client.Create(ctx, obj); err != nil { + return fmt.Errorf("failed to create jetstream configmap, err: %w", err) + } + r.logger.Info("created jetstream configmap successfully") + return nil + } else { + return fmt.Errorf("failed to check if jetstream configmap is existing, err: %w", err) + } + } + if old.GetAnnotations()[common.AnnotationResourceSpecHash] != hash { + old.Annotations[common.AnnotationResourceSpecHash] = hash + old.Data = data + if err := r.client.Update(ctx, old); err != nil { + return fmt.Errorf("failed to update jetstream configmap, err: %w", err) + } + r.logger.Info("updated jetstream configmap successfully") + } + return nil +} + +func (r *jetStreamInstaller) Uninstall(ctx context.Context) error { + return r.uninstallPVCs(ctx) +} + +func (r *jetStreamInstaller) uninstallPVCs(ctx context.Context) error { + // StatefulSet doesn't clean up PVC, needs to do it separately + // https://github.com/kubernetes/kubernetes/issues/55045 + pvcs, err := r.getPVCs(ctx) + if err != nil { + r.logger.Errorw("failed to get PVCs created by Nats statefulset when uninstalling", zap.Error(err)) + return err + } + for _, pvc := range pvcs { + err = r.client.Delete(ctx, &pvc) + if err != nil { + r.logger.Errorw("failed to delete pvc when uninstalling", zap.Any("pvcName", pvc.Name), zap.Error(err)) + return err + } + r.logger.Infow("pvc deleted", "pvcName", pvc.Name) + } + return nil +} + +// get PVCs created by streaming statefulset +// they have same labels as the statefulset +func (r *jetStreamInstaller) getPVCs(ctx context.Context) ([]corev1.PersistentVolumeClaim, error) { + pvcl := &corev1.PersistentVolumeClaimList{} + err := r.client.List(ctx, pvcl, &client.ListOptions{ + Namespace: r.eventBus.Namespace, + LabelSelector: labels.SelectorFromSet(r.labels), + }) + if err != nil { + return nil, err + } + return pvcl.Items, nil +} + +func generateJetStreamServerSecretName(eventBus *v1alpha1.EventBus) string { + return fmt.Sprintf("eventbus-%s-js-server", eventBus.Name) +} + +func (r *jetStreamInstaller) mergeEventBusLabels(given map[string]string) map[string]string { + result := map[string]string{} + if r.eventBus.Labels != nil { + for k, v := range r.eventBus.Labels { + result[k] = v + } + } + for k, v := range given { + result[k] = v + } + return result +} + +func generateJetStreamClientAuthSecretName(eventBus *v1alpha1.EventBus) string { + return fmt.Sprintf("eventbus-%s-js-client-auth", eventBus.Name) +} + +func generateJetStreamServiceName(eventBus *v1alpha1.EventBus) string { + return fmt.Sprintf("eventbus-%s-js-svc", eventBus.Name) +} + +func generateJetStreamStatefulSetName(eventBus *v1alpha1.EventBus) string { + return fmt.Sprintf("eventbus-%s-js", eventBus.Name) +} + +func generateJetStreamConfigMapName(eventBus *v1alpha1.EventBus) string { + return fmt.Sprintf("eventbus-%s-js-config", eventBus.Name) +} + +func generateJetStreamPVCName(eventBus *v1alpha1.EventBus) string { + return fmt.Sprintf("eventbus-%s-js-vol", eventBus.Name) +} diff --git a/controllers/eventbus/installer/jetstream_test.go b/controllers/eventbus/installer/jetstream_test.go new file mode 100644 index 0000000000..dfc83fa318 --- /dev/null +++ b/controllers/eventbus/installer/jetstream_test.go @@ -0,0 +1,285 @@ +package installer + +import ( + "context" + + "testing" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + appv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + apiresource "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + k8sfake "k8s.io/client-go/kubernetes/fake" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +var ( + testJetStreamEventBus = &v1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testName, + }, + Spec: v1alpha1.EventBusSpec{ + JetStream: &v1alpha1.JetStreamBus{ + Version: "2.7.3", + }, + }, + } + + testJetStreamExoticBus = &v1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testName, + }, + Spec: v1alpha1.EventBusSpec{ + JetStreamExotic: &v1alpha1.JetStreamConfig{ + URL: "nats://nats:4222", + }, + }, + } +) + +func TestJetStreamBadInstallation(t *testing.T) { + t.Run("bad installation", func(t *testing.T) { + badEventBus := testJetStreamEventBus.DeepCopy() + badEventBus.Spec.JetStream = nil + installer := &jetStreamInstaller{ + client: fake.NewClientBuilder().Build(), + kubeClient: k8sfake.NewSimpleClientset(), + eventBus: badEventBus, + config: fakeConfig, + labels: testLabels, + logger: zaptest.NewLogger(t).Sugar(), + } + _, err := installer.Install(context.TODO()) + assert.Error(t, err) + }) +} + +func TestJetStreamGenerateNames(t *testing.T) { + n := generateJetStreamStatefulSetName(testJetStreamEventBus) + assert.Equal(t, "eventbus-"+testJetStreamEventBus.Name+"-js", n) + n = generateJetStreamServerSecretName(testJetStreamEventBus) + assert.Equal(t, "eventbus-"+testJetStreamEventBus.Name+"-js-server", n) + n = generateJetStreamClientAuthSecretName(testJetStreamEventBus) + assert.Equal(t, "eventbus-"+testJetStreamEventBus.Name+"-js-client-auth", n) + n = generateJetStreamConfigMapName(testJetStreamEventBus) + assert.Equal(t, "eventbus-"+testJetStreamEventBus.Name+"-js-config", n) + n = generateJetStreamPVCName(testJetStreamEventBus) + assert.Equal(t, "eventbus-"+testJetStreamEventBus.Name+"-js-vol", n) + n = generateJetStreamServiceName(testJetStreamEventBus) + assert.Equal(t, "eventbus-"+testJetStreamEventBus.Name+"-js-svc", n) +} + +func TestJetStreamCreateObjects(t *testing.T) { + cl := fake.NewClientBuilder().Build() + ctx := context.TODO() + i := &jetStreamInstaller{ + client: cl, + kubeClient: k8sfake.NewSimpleClientset(), + eventBus: testJetStreamEventBus, + config: fakeConfig, + labels: testLabels, + logger: zaptest.NewLogger(t).Sugar(), + } + + t.Run("test create sts", func(t *testing.T) { + testObj := testJetStreamEventBus.DeepCopy() + i.eventBus = testObj + err := i.createStatefulSet(ctx) + assert.NoError(t, err) + sts := &appv1.StatefulSet{} + err = cl.Get(ctx, types.NamespacedName{Namespace: testObj.Namespace, Name: generateJetStreamStatefulSetName(testObj)}, sts) + assert.NoError(t, err) + assert.Equal(t, 3, len(sts.Spec.Template.Spec.Containers)) + assert.Contains(t, sts.Annotations, common.AnnotationResourceSpecHash) + assert.Equal(t, testJetStreamImage, sts.Spec.Template.Spec.Containers[0].Image) + assert.Equal(t, testJSReloaderImage, sts.Spec.Template.Spec.Containers[1].Image) + assert.Equal(t, testJetStreamExporterImage, sts.Spec.Template.Spec.Containers[2].Image) + assert.True(t, len(sts.Spec.Template.Spec.Volumes) > 1) + envNames := []string{} + for _, e := range sts.Spec.Template.Spec.Containers[0].Env { + envNames = append(envNames, e.Name) + } + for _, e := range []string{"POD_NAME", "SERVER_NAME", "POD_NAMESPACE", "CLUSTER_ADVERTISE", "JS_KEY"} { + assert.Contains(t, envNames, e) + } + }) + + t.Run("test create svc", func(t *testing.T) { + testObj := testJetStreamEventBus.DeepCopy() + i.eventBus = testObj + err := i.createService(ctx) + assert.NoError(t, err) + svc := &corev1.Service{} + err = cl.Get(ctx, types.NamespacedName{Namespace: testObj.Namespace, Name: generateJetStreamServiceName(testObj)}, svc) + assert.NoError(t, err) + assert.Equal(t, 4, len(svc.Spec.Ports)) + assert.Contains(t, svc.Annotations, common.AnnotationResourceSpecHash) + }) + + t.Run("test create auth secrets", func(t *testing.T) { + testObj := testJetStreamEventBus.DeepCopy() + i.eventBus = testObj + err := i.createSecrets(ctx) + assert.NoError(t, err) + s := &corev1.Secret{} + err = cl.Get(ctx, types.NamespacedName{Namespace: testObj.Namespace, Name: generateJetStreamServerSecretName(testObj)}, s) + assert.NoError(t, err) + assert.Equal(t, 8, len(s.Data)) + assert.Contains(t, s.Data, common.JetStreamServerSecretAuthKey) + assert.Contains(t, s.Data, common.JetStreamServerSecretEncryptionKey) + assert.Contains(t, s.Data, common.JetStreamServerPrivateKeyKey) + assert.Contains(t, s.Data, common.JetStreamServerCertKey) + assert.Contains(t, s.Data, common.JetStreamServerCACertKey) + assert.Contains(t, s.Data, common.JetStreamClusterPrivateKeyKey) + assert.Contains(t, s.Data, common.JetStreamClusterCertKey) + assert.Contains(t, s.Data, common.JetStreamClusterCACertKey) + s = &corev1.Secret{} + err = cl.Get(ctx, types.NamespacedName{Namespace: testObj.Namespace, Name: generateJetStreamClientAuthSecretName(testObj)}, s) + assert.NoError(t, err) + assert.Equal(t, 1, len(s.Data)) + assert.Contains(t, s.Data, common.JetStreamClientAuthSecretKey) + }) + + t.Run("test create configmap", func(t *testing.T) { + testObj := testJetStreamEventBus.DeepCopy() + i.eventBus = testObj + err := i.createConfigMap(ctx) + assert.NoError(t, err) + c := &corev1.ConfigMap{} + err = cl.Get(ctx, types.NamespacedName{Namespace: testObj.Namespace, Name: generateJetStreamConfigMapName(testObj)}, c) + assert.NoError(t, err) + assert.Equal(t, 1, len(c.Data)) + assert.Contains(t, c.Annotations, common.AnnotationResourceSpecHash) + }) +} + +func TestBuildJetStreamStatefulSetSpec(t *testing.T) { + cl := fake.NewClientBuilder().Build() + i := &jetStreamInstaller{ + client: cl, + eventBus: testJetStreamEventBus, + config: fakeConfig, + labels: testLabels, + logger: zaptest.NewLogger(t).Sugar(), + } + + t.Run("without persistence", func(t *testing.T) { + s := i.buildStatefulSetSpec(&fakeConfig.EventBus.JetStream.Versions[0]) + assert.Equal(t, int32(3), *s.Replicas) + assert.Equal(t, generateJetStreamServiceName(testJetStreamEventBus), s.ServiceName) + assert.Equal(t, testJetStreamImage, s.Template.Spec.Containers[0].Image) + assert.Equal(t, testJSReloaderImage, s.Template.Spec.Containers[1].Image) + assert.Equal(t, testJetStreamExporterImage, s.Template.Spec.Containers[2].Image) + assert.Equal(t, "test-controller", s.Selector.MatchLabels["controller"]) + assert.Equal(t, jsClientPort, s.Template.Spec.Containers[0].Ports[0].ContainerPort) + assert.Equal(t, jsClusterPort, s.Template.Spec.Containers[0].Ports[1].ContainerPort) + assert.Equal(t, jsMonitorPort, s.Template.Spec.Containers[0].Ports[2].ContainerPort) + assert.Equal(t, jsMetricsPort, s.Template.Spec.Containers[2].Ports[0].ContainerPort) + assert.False(t, len(s.VolumeClaimTemplates) > 0) + assert.True(t, len(s.Template.Spec.Volumes) > 0) + }) + + t.Run("with persistence", func(t *testing.T) { + st := "test" + i.eventBus.Spec.JetStream = &v1alpha1.JetStreamBus{ + Persistence: &v1alpha1.PersistenceStrategy{ + StorageClassName: &st, + }, + } + s := i.buildStatefulSetSpec(&fakeConfig.EventBus.JetStream.Versions[0]) + assert.True(t, len(s.VolumeClaimTemplates) > 0) + }) + + t.Run("with resource requests", func(t *testing.T) { + i.eventBus.Spec.JetStream = &v1alpha1.JetStreamBus{ + ContainerTemplate: &v1alpha1.ContainerTemplate{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: apiresource.Quantity{Format: "1"}, + corev1.ResourceMemory: apiresource.Quantity{Format: "350Mi"}, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: apiresource.Quantity{Format: "1"}, + corev1.ResourceMemory: apiresource.Quantity{Format: "400Mi"}, + }, + }, + }, + + MetricsContainerTemplate: &v1alpha1.ContainerTemplate{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: apiresource.Quantity{Format: "1"}, + corev1.ResourceMemory: apiresource.Quantity{Format: "200Mi"}, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: apiresource.Quantity{Format: "1"}, + corev1.ResourceMemory: apiresource.Quantity{Format: "200Mi"}, + }, + }, + }, + + ReloaderContainerTemplate: &v1alpha1.ContainerTemplate{ + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: apiresource.Quantity{Format: ".3"}, + corev1.ResourceMemory: apiresource.Quantity{Format: "100Mi"}, + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: apiresource.Quantity{Format: ".5"}, + corev1.ResourceMemory: apiresource.Quantity{Format: "100Mi"}, + }, + }, + }, + } + + statefulSpec := i.buildStatefulSetSpec(&fakeConfig.EventBus.JetStream.Versions[0]) + + podContainers := statefulSpec.Template.Spec.Containers + containers := map[string]*corev1.Container{} + for idx := range podContainers { + containers[podContainers[idx].Name] = &podContainers[idx] + } + + js := i.eventBus.Spec.JetStream + assert.Equal(t, js.ContainerTemplate.Resources, containers["main"].Resources) + assert.Equal(t, js.MetricsContainerTemplate.Resources, containers["metrics"].Resources) + assert.Equal(t, js.ReloaderContainerTemplate.Resources, containers["reloader"].Resources) + }) +} + +func TestJetStreamGetServiceSpec(t *testing.T) { + cl := fake.NewClientBuilder().Build() + i := &jetStreamInstaller{ + client: cl, + eventBus: testJetStreamEventBus, + config: fakeConfig, + labels: testLabels, + logger: zaptest.NewLogger(t).Sugar(), + } + spec := i.buildJetStreamServiceSpec() + assert.Equal(t, 4, len(spec.Ports)) + assert.Equal(t, corev1.ClusterIPNone, spec.ClusterIP) +} + +func Test_JSBufferGetReplicas(t *testing.T) { + s := v1alpha1.JetStreamBus{} + assert.Equal(t, 3, s.GetReplicas()) + five := int32(5) + s.Replicas = &five + assert.Equal(t, 5, s.GetReplicas()) +} diff --git a/controllers/eventbus/installer/kafka.go b/controllers/eventbus/installer/kafka.go new file mode 100644 index 0000000000..5b0feacfd4 --- /dev/null +++ b/controllers/eventbus/installer/kafka.go @@ -0,0 +1,47 @@ +package installer + +import ( + "context" + "fmt" + + "go.uber.org/zap" + + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" +) + +// exoticKafkaInstaller is an inalleration implementation of exotic kafka config. +type exoticKafkaInstaller struct { + eventBus *v1alpha1.EventBus + + logger *zap.SugaredLogger +} + +// NewExoticKafkaInstaller return a new exoticKafkaInstaller +func NewExoticKafkaInstaller(eventBus *v1alpha1.EventBus, logger *zap.SugaredLogger) Installer { + return &exoticKafkaInstaller{ + eventBus: eventBus, + logger: logger.Named("exotic-kafka"), + } +} + +func (i *exoticKafkaInstaller) Install(ctx context.Context) (*v1alpha1.BusConfig, error) { + kafkaObj := i.eventBus.Spec.Kafka + if kafkaObj == nil { + return nil, fmt.Errorf("invalid request") + } + if kafkaObj.Topic == "" { + kafkaObj.Topic = fmt.Sprintf("%s-%s", i.eventBus.Namespace, i.eventBus.Name) + } + + i.eventBus.Status.MarkDeployed("Skipped", "Skip deployment because of using exotic config.") + i.logger.Info("use exotic config") + busConfig := &v1alpha1.BusConfig{ + Kafka: kafkaObj, + } + return busConfig, nil +} + +func (i *exoticKafkaInstaller) Uninstall(ctx context.Context) error { + i.logger.Info("nothing to uninstall") + return nil +} diff --git a/controllers/eventbus/installer/kafka_test.go b/controllers/eventbus/installer/kafka_test.go new file mode 100644 index 0000000000..e5a045a13a --- /dev/null +++ b/controllers/eventbus/installer/kafka_test.go @@ -0,0 +1,53 @@ +package installer + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" +) + +const ( + testKafkaName = "test-kafka" + testKafkaURL = "kafka:9092" +) + +var ( + testKafkaExoticBus = &v1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: v1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: testKafkaName, + }, + Spec: v1alpha1.EventBusSpec{ + Kafka: &v1alpha1.KafkaBus{ + URL: testKafkaURL, + }, + }, + } +) + +func TestInstallationKafkaExotic(t *testing.T) { + t.Run("installation with exotic kafka config", func(t *testing.T) { + installer := NewExoticKafkaInstaller(testKafkaExoticBus, logging.NewArgoEventsLogger()) + conf, err := installer.Install(context.TODO()) + assert.NoError(t, err) + assert.NotNil(t, conf.Kafka) + assert.Equal(t, conf.Kafka.URL, testKafkaURL) + }) +} + +func TestUninstallationKafkaExotic(t *testing.T) { + t.Run("uninstallation with exotic kafka config", func(t *testing.T) { + installer := NewExoticKafkaInstaller(testKafkaExoticBus, logging.NewArgoEventsLogger()) + err := installer.Uninstall(context.TODO()) + assert.NoError(t, err) + }) +} diff --git a/controllers/eventbus/installer/nats.go b/controllers/eventbus/installer/nats.go index 337c1bf0d7..c6ca852d32 100644 --- a/controllers/eventbus/installer/nats.go +++ b/controllers/eventbus/installer/nats.go @@ -2,10 +2,7 @@ package installer import ( "context" - "crypto/rand" - "errors" "fmt" - "math/big" "strconv" "strings" "time" @@ -19,9 +16,11 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/controllers" controllerscommon "github.com/argoproj/argo-events/controllers/common" "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" ) @@ -39,27 +38,30 @@ const ( serverAuthSecretKey = "auth" // key of stan.conf in the configmap configMapKey = "stan-config" + + // default nats streaming version to be installed + defaultSTANVersion = "0.22.1" ) // natsInstaller is used create a NATS installation. type natsInstaller struct { - client client.Client - eventBus *v1alpha1.EventBus - streamingImage string - metricsImage string - labels map[string]string - logger *zap.SugaredLogger + client client.Client + kubeClient kubernetes.Interface + eventBus *v1alpha1.EventBus + config *controllers.GlobalConfig + labels map[string]string + logger *zap.SugaredLogger } // NewNATSInstaller returns a new NATS installer -func NewNATSInstaller(client client.Client, eventBus *v1alpha1.EventBus, streamingImage, metricsImage string, labels map[string]string, logger *zap.SugaredLogger) Installer { +func NewNATSInstaller(client client.Client, eventBus *v1alpha1.EventBus, config *controllers.GlobalConfig, labels map[string]string, kubeClient kubernetes.Interface, logger *zap.SugaredLogger) Installer { return &natsInstaller{ - client: client, - eventBus: eventBus, - streamingImage: streamingImage, - metricsImage: metricsImage, - labels: labels, - logger: logger.Named("nats"), + client: client, + kubeClient: kubeClient, + eventBus: eventBus, + config: config, + labels: labels, + logger: logger.Named("nats"), } } @@ -67,7 +69,7 @@ func NewNATSInstaller(client client.Client, eventBus *v1alpha1.EventBus, streami func (i *natsInstaller) Install(ctx context.Context) (*v1alpha1.BusConfig, error) { natsObj := i.eventBus.Spec.NATS if natsObj == nil || natsObj.Native == nil { - return nil, errors.New("invalid request") + return nil, fmt.Errorf("invalid request") } svc, err := i.createStanService(ctx) @@ -93,7 +95,6 @@ func (i *natsInstaller) Install(ctx context.Context) (*v1alpha1.BusConfig, error return nil, err } i.eventBus.Status.MarkDeployed("Succeeded", "NATS is deployed") - i.eventBus.Status.MarkConfigured() clusterID := generateClusterID(i.eventBus) busConfig := &v1alpha1.BusConfig{ NATS: &v1alpha1.NATSConfig{ @@ -124,16 +125,16 @@ func (i *natsInstaller) uninstallPVCs(ctx context.Context) error { log := i.logger pvcs, err := i.getPVCs(ctx, i.labels) if err != nil { - log.Errorw("failed to get PVCs created by nats streaming statefulset when uninstalling", zap.Error(err)) + log.Errorw("Failed to get PVCs created by nats streaming statefulset when uninstalling", zap.Error(err)) return err } for _, pvc := range pvcs { err = i.client.Delete(ctx, &pvc) if err != nil { - log.Errorw("failed to delete pvc when uninstalling", zap.Any("pvcName", pvc.Name), zap.Error(err)) + log.Errorw("Failed to delete pvc when uninstalling", zap.Any("pvcName", pvc.Name), zap.Error(err)) return err } - log.Infow("pvc deleted", "pvcName", pvc.Name) + log.Infow("Pvc deleted", "pvcName", pvc.Name) } return nil } @@ -144,13 +145,13 @@ func (i *natsInstaller) createStanService(ctx context.Context) (*corev1.Service, svc, err := i.getStanService(ctx) if err != nil && !apierrors.IsNotFound(err) { i.eventBus.Status.MarkDeployFailed("GetServiceFailed", "Get existing service failed") - log.Errorw("error getting existing service", zap.Error(err)) + log.Errorw("Error getting existing service", zap.Error(err)) return nil, err } expectedSvc, err := i.buildStanService() if err != nil { i.eventBus.Status.MarkDeployFailed("BuildServiceFailed", "Failed to build a service spec") - log.Errorw("error building service spec", zap.Error(err)) + log.Errorw("Error building service spec", zap.Error(err)) return nil, err } if svc != nil { @@ -163,36 +164,36 @@ func (i *natsInstaller) createStanService(ctx context.Context) (*corev1.Service, err = i.client.Update(ctx, svc) if err != nil { i.eventBus.Status.MarkDeployFailed("UpdateServiceFailed", "Failed to update existing service") - log.Errorw("error updating existing service", zap.Error(err)) + log.Errorw("Error updating existing service", zap.Error(err)) return nil, err } - log.Infow("service is updated", "serviceName", svc.Name) + log.Infow("Service is updated", "serviceName", svc.Name) } return svc, nil } err = i.client.Create(ctx, expectedSvc) if err != nil { i.eventBus.Status.MarkDeployFailed("CreateServiceFailed", "Failed to create a service") - log.Errorw("error creating a service", zap.Error(err)) + log.Errorw("Error creating a service", zap.Error(err)) return nil, err } - log.Infow("service is created", "serviceName", expectedSvc.Name) + log.Infow("Service is created", "serviceName", expectedSvc.Name) return expectedSvc, nil } -//Create a Configmap for NATS config +// Create a Configmap for NATS config func (i *natsInstaller) createConfigMap(ctx context.Context) (*corev1.ConfigMap, error) { log := i.logger cm, err := i.getConfigMap(ctx) if err != nil && !apierrors.IsNotFound(err) { i.eventBus.Status.MarkDeployFailed("GetConfigMapFailed", "Failed to get existing configmap") - log.Errorw("error getting existing configmap", zap.Error(err)) + log.Errorw("Error getting existing configmap", zap.Error(err)) return nil, err } expectedCm, err := i.buildConfigMap() if err != nil { i.eventBus.Status.MarkDeployFailed("BuildConfigMapFailed", "Failed to build a configmap spec") - log.Errorw("error building configmap spec", zap.Error(err)) + log.Errorw("Error building configmap spec", zap.Error(err)) return nil, err } if cm != nil { @@ -204,20 +205,20 @@ func (i *natsInstaller) createConfigMap(ctx context.Context) (*corev1.ConfigMap, err := i.client.Update(ctx, cm) if err != nil { i.eventBus.Status.MarkDeployFailed("UpdateConfigMapFailed", "Failed to update existing configmap") - log.Errorw("error updating configmap", zap.Error(err)) + log.Errorw("Error updating configmap", zap.Error(err)) return nil, err } - log.Infow("updated configmap", "configmapName", cm.Name) + log.Infow("Updated configmap", "configmapName", cm.Name) } return cm, nil } err = i.client.Create(ctx, expectedCm) if err != nil { i.eventBus.Status.MarkDeployFailed("CreateConfigMapFailed", "Failed to create configmap") - log.Errorw("error creating a configmap", zap.Error(err)) + log.Errorw("Error creating a configmap", zap.Error(err)) return nil, err } - log.Infow("created configmap", "configmapName", expectedCm.Name) + log.Infow("Created configmap", "configmapName", expectedCm.Name) return expectedCm, nil } @@ -227,13 +228,13 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 sSecret, err := i.getServerAuthSecret(ctx) if err != nil && !apierrors.IsNotFound(err) { i.eventBus.Status.MarkDeployFailed("GetServerAuthSecretFailed", "Failed to get existing server auth secret") - log.Errorw("error getting existing server auth secret", zap.Error(err)) + log.Errorw("Error getting existing server auth secret", zap.Error(err)) return nil, nil, err } cSecret, err := i.getClientAuthSecret(ctx) if err != nil && !apierrors.IsNotFound(err) { i.eventBus.Status.MarkDeployFailed("GetClientAuthSecretFailed", "Failed to get existing client auth secret") - log.Errorw("error getting existing client auth secret", zap.Error(err)) + log.Errorw("Error getting existing client auth secret", zap.Error(err)) return nil, nil, err } if strategy != v1alpha1.AuthStrategyNone { // Do not checkout AuthStrategyNone because it only has server auth secret @@ -252,10 +253,10 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 err = i.client.Delete(ctx, cSecret) if err != nil { i.eventBus.Status.MarkDeployFailed("DeleteClientAuthSecretFailed", "Failed to delete the client auth secret") - log.Errorw("error deleting client auth secret", zap.Error(err)) + log.Errorw("Error deleting client auth secret", zap.Error(err)) return nil, nil, err } - log.Info("deleted server auth secret") + log.Info("Deleted server auth secret") } if sSecret != nil && sSecret.Annotations != nil && sSecret.Annotations[authStrategyAnnoKey] == string(strategy) && len(sSecret.Data[serverAuthSecretKey]) == 0 { // If the server auth secret is already existing, strategy didn't change, and the secret is empty string, reuse it without updating. @@ -265,7 +266,7 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 expectedSSecret, err := i.buildServerAuthSecret(strategy, "") if err != nil { i.eventBus.Status.MarkDeployFailed("BuildServerAuthSecretFailed", "Failed to build a server auth secret spec") - log.Errorw("error building server auth secret spec", zap.Error(err)) + log.Errorw("Error building server auth secret spec", zap.Error(err)) return nil, nil, err } if sSecret != nil { @@ -275,27 +276,22 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 err = i.client.Update(ctx, sSecret) if err != nil { i.eventBus.Status.MarkDeployFailed("UpdateServerAuthSecretFailed", "Failed to update the server auth secret") - log.Errorw("error updating server auth secret", zap.Error(err)) + log.Errorw("Error updating server auth secret", zap.Error(err)) return nil, nil, err } - log.Infow("updated server auth secret", "serverAuthSecretName", sSecret.Name) + log.Infow("Updated server auth secret", "serverAuthSecretName", sSecret.Name) return sSecret, nil, nil } err = i.client.Create(ctx, expectedSSecret) if err != nil { i.eventBus.Status.MarkDeployFailed("CreateServerAuthSecretFailed", "Failed to create a server auth secret") - log.Errorw("error creating server auth secret", zap.Error(err)) + log.Errorw("Error creating server auth secret", zap.Error(err)) return nil, nil, err } - log.Infow("created server auth secret", "serverAuthSecretName", expectedSSecret.Name) + log.Infow("Created server auth secret", "serverAuthSecretName", expectedSSecret.Name) return expectedSSecret, nil, nil case v1alpha1.AuthStrategyToken: - token, err := generateToken(64) - if err != nil { - i.eventBus.Status.MarkDeployFailed("BuildServerAuthSecretFailed", "Failed to generate auth token") - log.Errorw("error generating auth token", zap.Error(err)) - return nil, nil, err - } + token := common.RandomString(64) serverAuthText := fmt.Sprintf(`authorization { token: "%s" }`, token) @@ -304,7 +300,7 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 expectedSSecret, err := i.buildServerAuthSecret(strategy, serverAuthText) if err != nil { i.eventBus.Status.MarkDeployFailed("BuildServerAuthSecretFailed", "Failed to build a server auth secret spec") - log.Errorw("error building server auth secret spec", zap.Error(err)) + log.Errorw("Error building server auth secret spec", zap.Error(err)) return nil, nil, err } returnedSSecret := expectedSSecret @@ -312,10 +308,10 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 err = i.client.Create(ctx, expectedSSecret) if err != nil { i.eventBus.Status.MarkDeployFailed("CreateServerAuthSecretFailed", "Failed to create a server auth secret") - log.Errorw("error creating server auth secret", zap.Error(err)) + log.Errorw("Error creating server auth secret", zap.Error(err)) return nil, nil, err } - log.Infow("created server auth secret", "serverAuthSecretName", expectedSSecret.Name) + log.Infow("Created server auth secret", "serverAuthSecretName", expectedSSecret.Name) } else { sSecret.Data = expectedSSecret.Data sSecret.SetLabels(expectedSSecret.Labels) @@ -323,17 +319,17 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 err = i.client.Update(ctx, sSecret) if err != nil { i.eventBus.Status.MarkDeployFailed("UpdateServerAuthSecretFailed", "Failed to update the server auth secret") - log.Errorw("error updating server auth secret", zap.Error(err)) + log.Errorw("Error updating server auth secret", zap.Error(err)) return nil, nil, err } - log.Infow("updated server auth secret", "serverAuthSecretName", sSecret.Name) + log.Infow("Updated server auth secret", "serverAuthSecretName", sSecret.Name) returnedSSecret = sSecret } // create client auth secret expectedCSecret, err := i.buildClientAuthSecret(strategy, clientAuthText) if err != nil { i.eventBus.Status.MarkDeployFailed("BuildClientAuthSecretFailed", "Failed to build a client auth secret spec") - log.Errorw("error building client auth secret spec", zap.Error(err)) + log.Errorw("Error building client auth secret spec", zap.Error(err)) return nil, nil, err } returnedCSecret := expectedCSecret @@ -341,10 +337,10 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 err = i.client.Create(ctx, expectedCSecret) if err != nil { i.eventBus.Status.MarkDeployFailed("CreateClientAuthSecretFailed", "Failed to create a client auth secret") - log.Errorw("error creating client auth secret", zap.Error(err)) + log.Errorw("Error creating client auth secret", zap.Error(err)) return nil, nil, err } - log.Infow("created client auth secret", "clientAuthSecretName", expectedCSecret.Name) + log.Infow("Created client auth secret", "clientAuthSecretName", expectedCSecret.Name) } else { cSecret.Data = expectedCSecret.Data cSecret.SetLabels(expectedCSecret.Labels) @@ -352,16 +348,16 @@ func (i *natsInstaller) createAuthSecrets(ctx context.Context, strategy v1alpha1 err = i.client.Update(ctx, cSecret) if err != nil { i.eventBus.Status.MarkDeployFailed("UpdateClientAuthSecretFailed", "Failed to update the client auth secret") - log.Errorw("error updating client auth secret", zap.Error(err)) + log.Errorw("Error updating client auth secret", zap.Error(err)) return nil, nil, err } - log.Infow("updated client auth secret", "clientAuthSecretName", cSecret.Name) + log.Infow("Updated client auth secret", "clientAuthSecretName", cSecret.Name) returnedCSecret = cSecret } return returnedSSecret, returnedCSecret, nil default: i.eventBus.Status.MarkDeployFailed("UnsupportedAuthStrategy", "Unsupported auth strategy") - return nil, nil, errors.New("unsupported auth strategy") + return nil, nil, fmt.Errorf("unsupported auth strategy") } } @@ -371,35 +367,36 @@ func (i *natsInstaller) createStatefulSet(ctx context.Context, serviceName, conf ss, err := i.getStatefulSet(ctx) if err != nil && !apierrors.IsNotFound(err) { i.eventBus.Status.MarkDeployFailed("GetStatefulSetFailed", "Failed to get existing statefulset") - log.Errorw("error getting existing statefulset", zap.Error(err)) + log.Errorw("Error getting existing statefulset", zap.Error(err)) return err } expectedSs, err := i.buildStatefulSet(serviceName, configmapName, authSecretName) if err != nil { i.eventBus.Status.MarkDeployFailed("BuildStatefulSetFailed", "Failed to build a statefulset spec") - log.Errorw("error building statefulset spec", zap.Error(err)) + log.Errorw("Error building statefulset spec", zap.Error(err)) return err } if ss != nil { if ss.Annotations != nil && ss.Annotations[common.AnnotationResourceSpecHash] == expectedSs.Annotations[common.AnnotationResourceSpecHash] { return nil } - // Delete the existing one to recreate it - err := i.client.Delete(ctx, ss) - if err != nil { - i.eventBus.Status.MarkDeployFailed("DeleteOldStatefulSetFailed", "Failed to delete a statefulset") - log.Errorw("error deleting a statefulset", zap.Error(err)) + ss.SetLabels(expectedSs.Labels) + ss.Annotations[common.AnnotationResourceSpecHash] = expectedSs.Annotations[common.AnnotationResourceSpecHash] + ss.Spec = expectedSs.Spec + if err := i.client.Update(ctx, ss); err != nil { + i.eventBus.Status.MarkDeployFailed("UpdateStatefulSetFailed", "Failed to update a statefulset") + log.Errorw("Error updating a statefulset", zap.Error(err)) return err } - log.Infow("old statefulset is deleted", "statefulsetName", ss.Name) + log.Infow("Statefulset is updated", "statefulsetName", ss.Name) + return nil } - err = i.client.Create(ctx, expectedSs) - if err != nil { + if err := i.client.Create(ctx, expectedSs); err != nil { i.eventBus.Status.MarkDeployFailed("CreateStatefulSetFailed", "Failed to create a statefulset") - log.Errorw("error creating a statefulset", zap.Error(err)) + log.Errorw("Error creating a statefulset", zap.Error(err)) return err } - log.Infow("statefulset is created", "statefulsetName", expectedSs.Name) + log.Infow("Statefulset is created", "statefulsetName", expectedSs.Name) return nil } @@ -414,7 +411,11 @@ func (i *natsInstaller) buildStanService() (*corev1.Service, error) { Spec: corev1.ServiceSpec{ ClusterIP: corev1.ClusterIPNone, Ports: []corev1.ServicePort{ - {Name: "client", Port: clientPort}, + // Prefix tcp- to enable clients to connect from + // an istio-enabled namespace, following: + // https://github.com/nats-io/nats-operator/issues/88 + // https://github.com/istio/istio/issues/28623 + {Name: "tcp-client", Port: clientPort}, {Name: "cluster", Port: clusterPort}, {Name: "monitor", Port: monitorPort}, }, @@ -437,7 +438,7 @@ func (i *natsInstaller) buildConfigMap() (*corev1.ConfigMap, error) { if replicas < 3 { replicas = 3 } - maxAge := common.NATSStreamingMaxAge + maxAge := common.STANMaxAge if i.eventBus.Spec.NATS.Native.MaxAge != nil { maxAge = *i.eventBus.Spec.NATS.Native.MaxAge } @@ -445,14 +446,54 @@ func (i *natsInstaller) buildConfigMap() (*corev1.ConfigMap, error) { if err != nil { return nil, err } - maxMsgs := uint64(1000000) + maxMsgs := common.STANMaxMsgs if i.eventBus.Spec.NATS.Native.MaxMsgs != nil { maxMsgs = *i.eventBus.Spec.NATS.Native.MaxMsgs } - maxBytes := "1GB" + maxSubs := common.STANMaxSubs + if i.eventBus.Spec.NATS.Native.MaxSubs != nil { + maxSubs = *i.eventBus.Spec.NATS.Native.MaxSubs + } + maxBytes := common.STANMaxBytes if i.eventBus.Spec.NATS.Native.MaxBytes != nil { maxBytes = *i.eventBus.Spec.NATS.Native.MaxBytes } + maxPayload := common.STANMaxPayload + if i.eventBus.Spec.NATS.Native.MaxPayload != nil { + maxPayload = *i.eventBus.Spec.NATS.Native.MaxPayload + } + raftHeartbeatTimeout := common.STANRaftHeartbeatTimeout + if i.eventBus.Spec.NATS.Native.RaftHeartbeatTimeout != nil { + raftHeartbeatTimeout = *i.eventBus.Spec.NATS.Native.RaftHeartbeatTimeout + } + _, err = time.ParseDuration(raftHeartbeatTimeout) + if err != nil { + return nil, err + } + raftElectionTimeout := common.STANRaftElectionTimeout + if i.eventBus.Spec.NATS.Native.RaftElectionTimeout != nil { + raftElectionTimeout = *i.eventBus.Spec.NATS.Native.RaftElectionTimeout + } + _, err = time.ParseDuration(raftElectionTimeout) + if err != nil { + return nil, err + } + raftLeaseTimeout := common.STANRaftLeaseTimeout + if i.eventBus.Spec.NATS.Native.RaftLeaseTimeout != nil { + raftLeaseTimeout = *i.eventBus.Spec.NATS.Native.RaftLeaseTimeout + } + _, err = time.ParseDuration(raftLeaseTimeout) + if err != nil { + return nil, err + } + raftCommitTimeout := common.STANRaftCommitTimeout + if i.eventBus.Spec.NATS.Native.RaftCommitTimeout != nil { + raftCommitTimeout = *i.eventBus.Spec.NATS.Native.RaftCommitTimeout + } + _, err = time.ParseDuration(raftCommitTimeout) + if err != nil { + return nil, err + } peers := []string{} routes := []string{} for j := 0; j < replicas; j++ { @@ -467,6 +508,7 @@ cluster { cluster_advertise: $CLUSTER_ADVERTISE connect_retries: 10 } +max_payload: %s streaming { id: %s store: file @@ -475,13 +517,18 @@ streaming { node_id: $POD_NAME peers: [%s] log_path: /data/stan/logs + raft_heartbeat_timeout: "%s" + raft_election_timeout: "%s" + raft_lease_timeout: "%s" + raft_commit_timeout: "%s" } store_limits { max_age: %s max_msgs: %v max_bytes: %s + max_subs: %v } -}`, strconv.Itoa(int(monitorPort)), strconv.Itoa(int(clusterPort)), strings.Join(routes, ","), clusterID, strings.Join(peers, ","), maxAge, maxMsgs, maxBytes) +}`, strconv.Itoa(int(monitorPort)), strconv.Itoa(int(clusterPort)), strings.Join(routes, ","), maxPayload, clusterID, strings.Join(peers, ","), raftHeartbeatTimeout, raftElectionTimeout, raftLeaseTimeout, raftCommitTimeout, maxAge, maxMsgs, maxBytes, maxSubs) cm := &corev1.ConfigMap{ ObjectMeta: metav1.ObjectMeta{ Namespace: i.eventBus.Namespace, @@ -503,9 +550,9 @@ streaming { // Parameter - secret // Example: // -// authorization { -// token: "abcd1234" -// } +// authorization { +// token: "abcd1234" +// } func (i *natsInstaller) buildServerAuthSecret(authStrategy v1alpha1.AuthStrategy, secret string) (*corev1.Secret, error) { s := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ @@ -568,11 +615,17 @@ func (i *natsInstaller) buildStatefulSet(serviceName, configmapName, authSecretN } func (i *natsInstaller) buildStatefulSetSpec(serviceName, configmapName, authSecretName string) (*appv1.StatefulSetSpec, error) { + stanVersion, err := i.config.GetSTANVersion(defaultSTANVersion) + if err != nil { + return nil, fmt.Errorf("failed to get nats streaming version, err: %w", err) + } // Streaming requires minimal size 3. replicas := i.eventBus.Spec.NATS.Native.Replicas if replicas < 3 { replicas = 3 } + var stanContainerPullPolicy, metricsContainerPullPolicy corev1.PullPolicy + var stanContainerSecurityContext, metricsContainerSecurityContext *corev1.SecurityContext stanContainerResources := corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: apiresource.MustParse("0"), @@ -581,11 +634,17 @@ func (i *natsInstaller) buildStatefulSetSpec(serviceName, configmapName, authSec containerTmpl := i.eventBus.Spec.NATS.Native.ContainerTemplate if containerTmpl != nil { stanContainerResources = containerTmpl.Resources + stanContainerPullPolicy = containerTmpl.ImagePullPolicy + stanContainerSecurityContext = containerTmpl.SecurityContext } metricsContainerResources := corev1.ResourceRequirements{} - if i.eventBus.Spec.NATS.Native.MetricsContainerTemplate != nil { - metricsContainerResources = i.eventBus.Spec.NATS.Native.MetricsContainerTemplate.Resources + metricsContainerTmpl := i.eventBus.Spec.NATS.Native.MetricsContainerTemplate + if metricsContainerTmpl != nil { + metricsContainerResources = metricsContainerTmpl.Resources + metricsContainerPullPolicy = metricsContainerTmpl.ImagePullPolicy + metricsContainerSecurityContext = metricsContainerTmpl.SecurityContext } + podTemplateLabels := make(map[string]string) if i.eventBus.Spec.NATS.Native.Metadata != nil && len(i.eventBus.Spec.NATS.Native.Metadata.Labels) > 0 { @@ -654,8 +713,9 @@ func (i *natsInstaller) buildStatefulSetSpec(serviceName, configmapName, authSec }, Containers: []corev1.Container{ { - Name: "stan", - Image: i.streamingImage, + Name: "stan", + Image: stanVersion.NATSStreamingImage, + ImagePullPolicy: stanContainerPullPolicy, Ports: []corev1.ContainerPort{ {Name: "client", ContainerPort: clientPort}, {Name: "cluster", ContainerPort: clusterPort}, @@ -670,9 +730,10 @@ func (i *natsInstaller) buildStatefulSetSpec(serviceName, configmapName, authSec VolumeMounts: []corev1.VolumeMount{ {Name: "config-volume", MountPath: "/etc/stan-config"}, }, - Resources: stanContainerResources, + Resources: stanContainerResources, + SecurityContext: stanContainerSecurityContext, LivenessProbe: &corev1.Probe{ - Handler: corev1.Handler{ + ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Path: "/", Port: intstr.FromInt(int(monitorPort)), @@ -683,13 +744,15 @@ func (i *natsInstaller) buildStatefulSetSpec(serviceName, configmapName, authSec }, }, { - Name: "metrics", - Image: i.metricsImage, + Name: "metrics", + Image: stanVersion.MetricsExporterImage, + ImagePullPolicy: metricsContainerPullPolicy, Ports: []corev1.ContainerPort{ {Name: "metrics", ContainerPort: common.EventBusMetricsPort}, }, - Args: []string{"-connz", "-routez", "-subz", "-varz", "-channelz", "-serverz", fmt.Sprintf("http://localhost:%s", strconv.Itoa(int(monitorPort)))}, - Resources: metricsContainerResources, + Args: []string{"-connz", "-routez", "-subz", "-varz", "-channelz", "-serverz", fmt.Sprintf("http://localhost:%s", strconv.Itoa(int(monitorPort)))}, + Resources: metricsContainerResources, + SecurityContext: metricsContainerSecurityContext, }, }, }, @@ -722,7 +785,7 @@ func (i *natsInstaller) buildStatefulSetSpec(serviceName, configmapName, authSec }, VolumeMode: &volMode, StorageClassName: i.eventBus.Spec.NATS.Native.Persistence.StorageClassName, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceStorage: volSize, }, @@ -744,20 +807,6 @@ func (i *natsInstaller) buildStatefulSetSpec(serviceName, configmapName, authSec volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/data/stan"}) spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts } - if spec.Template.Spec.Affinity == nil && i.eventBus.Spec.NATS.Native.DeprecatedAntiAffinity { - spec.Template.Spec.Affinity = &corev1.Affinity{ - PodAntiAffinity: &corev1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ - { - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: i.labels, - }, - }, - }, - }, - } - } return &spec, nil } @@ -812,11 +861,7 @@ func (i *natsInstaller) getClientAuthSecret(ctx context.Context) (*corev1.Secret } func (i *natsInstaller) getSecret(ctx context.Context, labels map[string]string) (*corev1.Secret, error) { - sl := &corev1.SecretList{} - err := i.client.List(ctx, sl, &client.ListOptions{ - Namespace: i.eventBus.Namespace, - LabelSelector: labelSelector(labels), - }) + sl, err := i.kubeClient.CoreV1().Secrets(i.eventBus.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector(labels).String()}) if err != nil { return nil, err } @@ -874,20 +919,6 @@ func (i *natsInstaller) mergeEventBusLabels(given map[string]string) map[string] return result } -// generate a random string as token with given length -func generateToken(length int) (string, error) { - seeds := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - result := make([]byte, length) - for i := 0; i < length; i++ { - num, err := rand.Int(rand.Reader, big.NewInt(int64(len(seeds)))) - if err != nil { - return "", err - } - result[i] = seeds[num.Int64()] - } - return string(result), nil -} - func serverAuthSecretLabels(given map[string]string) map[string]string { result := map[string]string{"server-auth-secret": "yes"} for k, v := range given { diff --git a/controllers/eventbus/installer/nats_test.go b/controllers/eventbus/installer/nats_test.go index 591c29d491..03cf82fe9a 100644 --- a/controllers/eventbus/installer/nats_test.go +++ b/controllers/eventbus/installer/nats_test.go @@ -8,9 +8,11 @@ import ( "testing" "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sfake "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -20,16 +22,14 @@ import ( ) const ( - testNamespace = "test-ns" - testName = "test-name" - testStreamingImage = "test-s-image" - testMetricsImage = "test-m-image" + testNamespace = "test-ns" + testName = "test-name" ) var ( testLabels = map[string]string{"controller": "test-controller"} - testEventBus = &v1alpha1.EventBus{ + testNatsEventBus = &v1alpha1.EventBus{ TypeMeta: metav1.TypeMeta{ APIVersion: v1alpha1.SchemeGroupVersion.String(), Kind: "EventBus", @@ -111,12 +111,11 @@ func init() { func TestBadInstallation(t *testing.T) { t.Run("bad installation", func(t *testing.T) { installer := &natsInstaller{ - client: fake.NewClientBuilder().Build(), - eventBus: testEventBusBad, - streamingImage: testStreamingImage, - metricsImage: testMetricsImage, - labels: testLabels, - logger: logging.NewArgoEventsLogger(), + client: fake.NewClientBuilder().Build(), + eventBus: testEventBusBad, + config: fakeConfig, + labels: testLabels, + logger: logging.NewArgoEventsLogger(), } _, err := installer.Install(context.TODO()) assert.Error(t, err) @@ -124,9 +123,10 @@ func TestBadInstallation(t *testing.T) { } func TestInstallationAuthtoken(t *testing.T) { + kubeClient := k8sfake.NewSimpleClientset() t.Run("auth token installation", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - installer := NewNATSInstaller(cl, testEventBus, testStreamingImage, testMetricsImage, testLabels, logging.NewArgoEventsLogger()) + installer := NewNATSInstaller(cl, testNatsEventBus, fakeConfig, testLabels, kubeClient, zaptest.NewLogger(t).Sugar()) busconf, err := installer.Install(context.TODO()) assert.NoError(t, err) assert.NotNil(t, busconf.NATS) @@ -173,9 +173,10 @@ func TestInstallationAuthtoken(t *testing.T) { } func TestInstallationAuthNone(t *testing.T) { + kubeClient := k8sfake.NewSimpleClientset() t.Run("auth none installation", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - installer := NewNATSInstaller(cl, testEventBusAuthNone, testStreamingImage, testMetricsImage, testLabels, logging.NewArgoEventsLogger()) + installer := NewNATSInstaller(cl, testEventBusAuthNone, fakeConfig, testLabels, kubeClient, zaptest.NewLogger(t).Sugar()) busconf, err := installer.Install(context.TODO()) assert.NoError(t, err) assert.NotNil(t, busconf.NATS) @@ -212,12 +213,11 @@ func TestBuildPersistStatefulSetSpec(t *testing.T) { t.Run("installation with persistence", func(t *testing.T) { cl := fake.NewClientBuilder().Build() installer := &natsInstaller{ - client: cl, - eventBus: testEventBusPersist, - streamingImage: testStreamingImage, - metricsImage: testMetricsImage, - labels: testLabels, - logger: logging.NewArgoEventsLogger(), + client: cl, + eventBus: testEventBusPersist, + config: fakeConfig, + labels: testLabels, + logger: logging.NewArgoEventsLogger(), } ss, err := installer.buildStatefulSet("svcName", "cmName", "secretName") assert.NoError(t, err) @@ -227,12 +227,11 @@ func TestBuildPersistStatefulSetSpec(t *testing.T) { t.Run("installation with image pull secrets", func(t *testing.T) { cl := fake.NewClientBuilder().Build() installer := &natsInstaller{ - client: cl, - eventBus: testEventBus, - streamingImage: testStreamingImage, - metricsImage: testMetricsImage, - labels: testLabels, - logger: logging.NewArgoEventsLogger(), + client: cl, + eventBus: testNatsEventBus, + config: fakeConfig, + labels: testLabels, + logger: logging.NewArgoEventsLogger(), } ss, err := installer.buildStatefulSet("svcName", "cmName", "secretName") assert.NoError(t, err) @@ -241,15 +240,14 @@ func TestBuildPersistStatefulSetSpec(t *testing.T) { t.Run("installation with priority class", func(t *testing.T) { cl := fake.NewClientBuilder().Build() - eb := testEventBus.DeepCopy() + eb := testNatsEventBus.DeepCopy() eb.Spec.NATS.Native.PriorityClassName = "test-class" installer := &natsInstaller{ - client: cl, - eventBus: eb, - streamingImage: testStreamingImage, - metricsImage: testMetricsImage, - labels: testLabels, - logger: logging.NewArgoEventsLogger(), + client: cl, + eventBus: eb, + config: fakeConfig, + labels: testLabels, + logger: logging.NewArgoEventsLogger(), } ss, err := installer.buildStatefulSet("svcName", "cmName", "secretName") assert.NoError(t, err) @@ -261,12 +259,11 @@ func TestBuildServiceAccountStatefulSetSpec(t *testing.T) { t.Run("installation with Service Account Name", func(t *testing.T) { cl := fake.NewClientBuilder().Build() installer := &natsInstaller{ - client: cl, - eventBus: testEventBus, - streamingImage: testStreamingImage, - metricsImage: testMetricsImage, - labels: testLabels, - logger: logging.NewArgoEventsLogger(), + client: cl, + eventBus: testNatsEventBus, + config: fakeConfig, + labels: testLabels, + logger: logging.NewArgoEventsLogger(), } ss, err := installer.buildStatefulSet("svcName", "cmName", "secretName") assert.NoError(t, err) @@ -278,12 +275,11 @@ func TestBuildConfigMap(t *testing.T) { t.Run("test build config map", func(t *testing.T) { cl := fake.NewClientBuilder().Build() installer := &natsInstaller{ - client: cl, - eventBus: testEventBus, - streamingImage: testStreamingImage, - metricsImage: testMetricsImage, - labels: testLabels, - logger: logging.NewArgoEventsLogger(), + client: cl, + eventBus: testNatsEventBus, + config: fakeConfig, + labels: testLabels, + logger: logging.NewArgoEventsLogger(), } cm, err := installer.buildConfigMap() assert.NoError(t, err) @@ -291,8 +287,8 @@ func TestBuildConfigMap(t *testing.T) { conf, ok := cm.Data[configMapKey] assert.True(t, ok) assert.True(t, strings.Contains(conf, "routes:")) - svcName := generateServiceName(testEventBus) - ssName := generateStatefulSetName(testEventBus) + svcName := generateServiceName(testNatsEventBus) + ssName := generateStatefulSetName(testNatsEventBus) r := fmt.Sprintf("nats://%s-%s.%s.%s.svc:%s", ssName, "0", svcName, testNamespace, strconv.Itoa(int(clusterPort))) lines := strings.Split(conf, `\n`) for _, l := range lines { @@ -303,10 +299,3 @@ func TestBuildConfigMap(t *testing.T) { } }) } - -func TestGenerateToken(t *testing.T) { - n := 30 - token, err := generateToken(n) - assert.NoError(t, err) - assert.Equal(t, len(token), n) -} diff --git a/controllers/eventbus/validate.go b/controllers/eventbus/validate.go index 36db58d4df..17bdd39408 100644 --- a/controllers/eventbus/validate.go +++ b/controllers/eventbus/validate.go @@ -1,26 +1,50 @@ package eventbus import ( - "github.com/pkg/errors" + "fmt" "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" ) // ValidateEventBus accepts an EventBus and performs validation against it func ValidateEventBus(eb *v1alpha1.EventBus) error { - if eb.Spec.NATS != nil { - if eb.Spec.NATS.Native != nil && eb.Spec.NATS.Exotic != nil { - return errors.New("\"spec.nats.native\" and \"spec.nats.exotic\" can not be defined together") + if eb.Spec.NATS == nil && eb.Spec.JetStream == nil && eb.Spec.Kafka == nil && eb.Spec.JetStreamExotic == nil { + return fmt.Errorf("invalid spec: either \"nats\", \"jetstream\", \"jetstreamExotic\", or \"kafka\" needs to be specified") + } + if x := eb.Spec.NATS; x != nil { + if x.Native != nil && x.Exotic != nil { + return fmt.Errorf("\"spec.nats.native\" and \"spec.nats.exotic\" can not be defined together") + } + if x.Native == nil && x.Exotic == nil { + return fmt.Errorf("either \"native\" or \"exotic\" must be defined") } - if eb.Spec.NATS.Exotic != nil { - e := eb.Spec.NATS.Exotic + if x.Exotic != nil { + e := x.Exotic if e.ClusterID == nil { - return errors.New("\"spec.nats.exotic.clusterID\" is missing") + return fmt.Errorf("\"spec.nats.exotic.clusterID\" is missing") } if e.URL == "" { - return errors.New("\"spec.nats.exotic.url\" is missing") + return fmt.Errorf("\"spec.nats.exotic.url\" is missing") } } } + if x := eb.Spec.JetStream; x != nil { + if x.Version == "" { + return fmt.Errorf("invalid spec: a version for jetstream needs to be specified") + } + if x.Replicas != nil && (*x.Replicas == 2 || *x.Replicas <= 0) { + return fmt.Errorf("invalid spec: a jetstream eventbus requires 1 replica or >= 3 replicas") + } + } + if x := eb.Spec.Kafka; x != nil { + if x.URL == "" { + return fmt.Errorf("\"spec.kafka.url\" is missing") + } + } + if x := eb.Spec.JetStreamExotic; x != nil { + if x.URL == "" { + return fmt.Errorf("\"spec.jetstreamExotic.url\" is missing") + } + } return nil } diff --git a/controllers/eventbus/validate_test.go b/controllers/eventbus/validate_test.go index e9799a4dad..1e952a35a0 100644 --- a/controllers/eventbus/validate_test.go +++ b/controllers/eventbus/validate_test.go @@ -6,13 +6,14 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" ) var ( - testEventBus = &v1alpha1.EventBus{ + testNatsEventBus = &v1alpha1.EventBus{ ObjectMeta: metav1.ObjectMeta{ Namespace: "test-ns", Name: common.DefaultEventBusName, @@ -25,24 +26,83 @@ var ( }, }, } + + testJetStreamEventBus = &v1alpha1.EventBus{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + Name: common.DefaultEventBusName, + }, + Spec: v1alpha1.EventBusSpec{ + JetStream: &v1alpha1.JetStreamBus{ + Version: "2.7.3", + }, + }, + } + + testJetStreamExoticBus = &v1alpha1.EventBus{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + Name: common.DefaultEventBusName, + }, + Spec: v1alpha1.EventBusSpec{ + JetStreamExotic: &v1alpha1.JetStreamConfig{ + URL: "nats://nats:4222", + }, + }, + } + + testKafkaEventBus = &v1alpha1.EventBus{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + Name: common.DefaultEventBusName, + }, + Spec: v1alpha1.EventBusSpec{ + Kafka: &v1alpha1.KafkaBus{ + URL: "127.0.0.1:9092", + }, + }, + } ) func TestValidate(t *testing.T) { - t.Run("test good eventbus", func(t *testing.T) { - err := ValidateEventBus(testEventBus) + t.Run("test good nats eventbus", func(t *testing.T) { + err := ValidateEventBus(testNatsEventBus) assert.NoError(t, err) }) - t.Run("test native exotic conflicting eventbus", func(t *testing.T) { - eb := testEventBus.DeepCopy() + t.Run("test good js eventbus", func(t *testing.T) { + err := ValidateEventBus(testJetStreamEventBus) + assert.NoError(t, err) + }) + + t.Run("test good kafka eventbus", func(t *testing.T) { + err := ValidateEventBus(testKafkaEventBus) + assert.NoError(t, err) + }) + + t.Run("test good js exotic eventbus", func(t *testing.T) { + err := ValidateEventBus(testJetStreamExoticBus) + assert.NoError(t, err) + }) + + t.Run("test bad eventbus", func(t *testing.T) { + eb := testNatsEventBus.DeepCopy() + eb.Spec.NATS = nil + err := ValidateEventBus(eb) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid spec: either") + }) + + t.Run("test native nats exotic conflicting eventbus", func(t *testing.T) { + eb := testNatsEventBus.DeepCopy() eb.Spec.NATS.Exotic = &v1alpha1.NATSConfig{} err := ValidateEventBus(eb) assert.Error(t, err) assert.True(t, strings.Contains(err.Error(), "can not be defined together")) }) - t.Run("test exotic eventbus no clusterID", func(t *testing.T) { - eb := testEventBus.DeepCopy() + t.Run("test exotic nats eventbus no clusterID", func(t *testing.T) { + eb := testNatsEventBus.DeepCopy() eb.Spec.NATS.Native = nil eb.Spec.NATS.Exotic = &v1alpha1.NATSConfig{} err := ValidateEventBus(eb) @@ -50,8 +110,8 @@ func TestValidate(t *testing.T) { assert.True(t, strings.Contains(err.Error(), "\"spec.nats.exotic.clusterID\" is missing")) }) - t.Run("test exotic eventbus empty URL", func(t *testing.T) { - eb := testEventBus.DeepCopy() + t.Run("test exotic nats eventbus empty URL", func(t *testing.T) { + eb := testNatsEventBus.DeepCopy() eb.Spec.NATS.Native = nil cID := "test-cluster-id" eb.Spec.NATS.Exotic = &v1alpha1.NATSConfig{ @@ -61,4 +121,38 @@ func TestValidate(t *testing.T) { assert.Error(t, err) assert.True(t, strings.Contains(err.Error(), "\"spec.nats.exotic.url\" is missing")) }) + + t.Run("test js eventbus no version", func(t *testing.T) { + eb := testJetStreamEventBus.DeepCopy() + eb.Spec.JetStream.Version = "" + err := ValidateEventBus(eb) + assert.Error(t, err) + assert.Contains(t, err.Error(), "invalid spec: a version") + }) + + t.Run("test js eventbus replica", func(t *testing.T) { + eb := testJetStreamEventBus.DeepCopy() + eb.Spec.JetStream.Replicas = ptr.To[int32](3) + err := ValidateEventBus(eb) + assert.NoError(t, err) + eb.Spec.JetStream.Replicas = nil + err = ValidateEventBus(eb) + assert.NoError(t, err) + }) + + t.Run("test kafka eventbus no URL", func(t *testing.T) { + eb := testKafkaEventBus.DeepCopy() + eb.Spec.Kafka.URL = "" + err := ValidateEventBus(eb) + assert.Error(t, err) + assert.Contains(t, err.Error(), "\"spec.kafka.url\" is missing") + }) + + t.Run("test exotic js eventbus empty URL", func(t *testing.T) { + eb := testJetStreamExoticBus.DeepCopy() + eb.Spec.JetStreamExotic.URL = "" + err := ValidateEventBus(eb) + assert.Error(t, err) + assert.True(t, strings.Contains(err.Error(), "\"spec.jetstreamExotic.url\" is missing")) + }) } diff --git a/controllers/eventsource/cmd/start.go b/controllers/eventsource/cmd/start.go deleted file mode 100644 index 4dbaad9810..0000000000 --- a/controllers/eventsource/cmd/start.go +++ /dev/null @@ -1,109 +0,0 @@ -package cmd - -import ( - "fmt" - "os" - "reflect" - - "go.uber.org/zap" - appv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - argoevents "github.com/argoproj/argo-events" - "github.com/argoproj/argo-events/common" - "github.com/argoproj/argo-events/common/logging" - "github.com/argoproj/argo-events/controllers/eventsource" - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" - eventsourcev1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" -) - -const ( - eventSourceImageEnvVar = "EVENTSOURCE_IMAGE" -) - -func Start(namespaced bool, managedNamespace string) { - logger := logging.NewArgoEventsLogger().Named(eventsource.ControllerName) - eventSourceImage, defined := os.LookupEnv(eventSourceImageEnvVar) - if !defined { - logger.Fatalf("required environment variable '%s' not defined", eventSourceImageEnvVar) - } - opts := ctrl.Options{ - MetricsBindAddress: fmt.Sprintf(":%d", common.ControllerMetricsPort), - HealthProbeBindAddress: ":8081", - } - if namespaced { - opts.Namespace = managedNamespace - } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opts) - if err != nil { - logger.Fatalw("unable to get a controller-runtime manager", zap.Error(err)) - } - - // Readyness probe - if err := mgr.AddReadyzCheck("readiness", healthz.Ping); err != nil { - logger.Fatalw("unable add a readiness check", zap.Error(err)) - } - - // Liveness probe - if err := mgr.AddHealthzCheck("liveness", healthz.Ping); err != nil { - logger.Fatalw("unable add a health check", zap.Error(err)) - } - - if err := eventsourcev1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatalw("unable to add EventSource scheme", zap.Error(err)) - } - - if err := eventbusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatalw("unable to add EventBus scheme", zap.Error(err)) - } - - // A controller with DefaultControllerRateLimiter - c, err := controller.New(eventsource.ControllerName, mgr, controller.Options{ - Reconciler: eventsource.NewReconciler(mgr.GetClient(), mgr.GetScheme(), eventSourceImage, logger), - }) - if err != nil { - logger.Fatalw("unable to set up individual controller", zap.Error(err)) - } - - // Watch EventSource and enqueue EventSource object key - if err := c.Watch(&source.Kind{Type: &eventsourcev1alpha1.EventSource{}}, &handler.EnqueueRequestForObject{}, - predicate.Or( - predicate.GenerationChangedPredicate{}, - // TODO: change to use LabelChangedPredicate with controller-runtime v0.8 - predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if e.ObjectOld == nil { - return false - } - if e.ObjectNew == nil { - return false - } - return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) - }}, - )); err != nil { - logger.Fatalw("unable to watch EventSources", zap.Error(err)) - } - - // Watch Deployments and enqueue owning EventSource key - if err := c.Watch(&source.Kind{Type: &appv1.Deployment{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventsourcev1alpha1.EventSource{}, IsController: true}, predicate.GenerationChangedPredicate{}); err != nil { - logger.Fatalw("unable to watch Deployments", zap.Error(err)) - } - - // Watch Services and enqueue owning EventSource key - if err := c.Watch(&source.Kind{Type: &corev1.Service{}}, &handler.EnqueueRequestForOwner{OwnerType: &eventsourcev1alpha1.EventSource{}, IsController: true}, predicate.GenerationChangedPredicate{}); err != nil { - logger.Fatalw("unable to watch Services", zap.Error(err)) - } - - logger.Infow("starting eventsource controller", "version", argoevents.GetVersion()) - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - logger.Fatalw("unable to run eventsource controller", zap.Error(err)) - } -} diff --git a/controllers/eventsource/controller.go b/controllers/eventsource/controller.go index 767c177046..e0e0ea2911 100644 --- a/controllers/eventsource/controller.go +++ b/controllers/eventsource/controller.go @@ -12,8 +12,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + "github.com/pkg/errors" ) const ( @@ -29,11 +32,13 @@ type reconciler struct { eventSourceImage string logger *zap.SugaredLogger + + cfClient *codefresh.Client } // NewReconciler returns a new reconciler -func NewReconciler(client client.Client, scheme *runtime.Scheme, eventSourceImage string, logger *zap.SugaredLogger) reconcile.Reconciler { - return &reconciler{client: client, scheme: scheme, eventSourceImage: eventSourceImage, logger: logger} +func NewReconciler(client client.Client, scheme *runtime.Scheme, eventSourceImage string, logger *zap.SugaredLogger, cfClient *codefresh.Client) reconcile.Reconciler { + return &reconciler{client: client, scheme: scheme, eventSourceImage: eventSourceImage, logger: logger, cfClient: cfClient} } func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -47,13 +52,19 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, err } log := r.logger.With("namespace", eventSource.Namespace).With("eventSource", eventSource.Name) + ctx = logging.WithLogger(ctx, log) esCopy := eventSource.DeepCopy() reconcileErr := r.reconcile(ctx, esCopy) if reconcileErr != nil { log.Errorw("reconcile error", zap.Error(reconcileErr)) + r.cfClient.ReportError(errors.Wrap(reconcileErr, "reconcile error"), codefresh.ErrorContext{ + ObjectMeta: eventSource.ObjectMeta, + TypeMeta: eventSource.TypeMeta, + }) } if r.needsUpdate(eventSource, esCopy) { - if err := r.client.Update(ctx, esCopy); err != nil { + // Use a DeepCopy to update, because it will be mutated afterwards, with empty Status. + if err := r.client.Update(ctx, esCopy.DeepCopy()); err != nil { return reconcile.Result{}, err } } @@ -65,7 +76,7 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // reconcile does the real logic func (r *reconciler) reconcile(ctx context.Context, eventSource *v1alpha1.EventSource) error { - log := r.logger.With("namespace", eventSource.Namespace).With("eventSource", eventSource.Name) + log := logging.FromContext(ctx) if !eventSource.DeletionTimestamp.IsZero() { log.Info("deleting eventsource") if controllerutil.ContainsFinalizer(eventSource, finalizerName) { diff --git a/controllers/eventsource/controller_test.go b/controllers/eventsource/controller_test.go index eb763ef3d7..2b35bff9af 100644 --- a/controllers/eventsource/controller_test.go +++ b/controllers/eventsource/controller_test.go @@ -72,6 +72,52 @@ var ( }, }, } + + fakeEventBusJetstream = &eventbusv1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: common.DefaultEventBusName, + }, + Spec: eventbusv1alpha1.EventBusSpec{ + JetStream: &eventbusv1alpha1.JetStreamBus{ + Version: "x.x.x", + }, + }, + Status: eventbusv1alpha1.EventBusStatus{ + Config: eventbusv1alpha1.BusConfig{ + JetStream: &eventbusv1alpha1.JetStreamConfig{ + URL: "nats://xxxx", + }, + }, + }, + } + + fakeEventBusKafka = &eventbusv1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: common.DefaultEventBusName, + }, + Spec: eventbusv1alpha1.EventBusSpec{ + Kafka: &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + }, + }, + Status: eventbusv1alpha1.EventBusStatus{ + Config: eventbusv1alpha1.BusConfig{ + Kafka: &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + }, + }, + }, + } ) func fakeEmptyEventSource() *v1alpha1.EventSource { @@ -88,12 +134,14 @@ func fakeCalendarEventSourceMap(name string) map[string]v1alpha1.CalendarEventSo return map[string]v1alpha1.CalendarEventSource{name: {Schedule: "*/5 * * * *"}} } -func fakeWebhookEventSourceMap(name string) map[string]v1alpha1.WebhookContext { - return map[string]v1alpha1.WebhookContext{ +func fakeWebhookEventSourceMap(name string) map[string]v1alpha1.WebhookEventSource { + return map[string]v1alpha1.WebhookEventSource{ name: { - URL: "http://a.b", - Endpoint: "/abc", - Port: "1234", + WebhookContext: v1alpha1.WebhookContext{ + URL: "http://a.b", + Endpoint: "/abc", + Port: "1234", + }, }, } } diff --git a/controllers/eventsource/resource.go b/controllers/eventsource/resource.go index 83f93de945..01534795f2 100644 --- a/controllers/eventsource/resource.go +++ b/controllers/eventsource/resource.go @@ -5,9 +5,10 @@ import ( "encoding/base64" "encoding/json" "fmt" + "os" + "sort" "github.com/imdario/mergo" - "github.com/pkg/errors" "go.uber.org/zap" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -18,6 +19,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" controllerscommon "github.com/argoproj/argo-events/controllers/common" eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" @@ -45,7 +47,7 @@ func Reconcile(client client.Client, args *AdaptorArgs, logger *zap.SugaredLogge if apierrors.IsNotFound(err) { eventSource.Status.MarkDeployFailed("EventBusNotFound", "EventBus not found.") logger.Errorw("EventBus not found", "eventBusName", eventBusName, "error", err) - return errors.Errorf("eventbus %s not found", eventBusName) + return fmt.Errorf("eventbus %s not found", eventBusName) } eventSource.Status.MarkDeployFailed("GetEventBusFailed", "Failed to get EventBus.") logger.Errorw("failed to get EventBus", "eventBusName", eventBusName, "error", err) @@ -54,8 +56,9 @@ func Reconcile(client client.Client, args *AdaptorArgs, logger *zap.SugaredLogge if !eventBus.Status.IsReady() { eventSource.Status.MarkDeployFailed("EventBusNotReady", "EventBus not ready.") logger.Errorw("event bus is not in ready status", "eventBusName", eventBusName, "error", err) - return errors.New("eventbus not ready") + return fmt.Errorf("eventbus not ready") } + expectedDeploy, err := buildDeployment(args, eventBus) if err != nil { eventSource.Status.MarkDeployFailed("BuildDeploymentSpecFailed", "Failed to build Deployment spec.") @@ -167,104 +170,127 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a return nil, err } eventSourceCopy := &v1alpha1.EventSource{ + TypeMeta: metav1.TypeMeta{ + Kind: args.EventSource.Kind, + APIVersion: args.EventSource.APIVersion, + }, ObjectMeta: metav1.ObjectMeta{ Namespace: args.EventSource.Namespace, Name: args.EventSource.Name, + Labels: common.CopyStringMap(args.EventSource.Labels), }, Spec: args.EventSource.Spec, } eventSourceBytes, err := json.Marshal(eventSourceCopy) if err != nil { - return nil, errors.New("failed marshal eventsource spec") + return nil, fmt.Errorf("failed marshal eventsource spec") + } + busConfigBytes, err := json.Marshal(eventBus.Status.Config) + if err != nil { + return nil, fmt.Errorf("failed marshal event bus config: %v", err) } - encodedEventSourceSpec := base64.StdEncoding.EncodeToString(eventSourceBytes) - envVars := []corev1.EnvVar{ + + env := []corev1.EnvVar{ { Name: common.EnvVarEventSourceObject, - Value: encodedEventSourceSpec, + Value: base64.StdEncoding.EncodeToString(eventSourceBytes), + }, + { + Name: codefresh.EnvVarShouldReportToCF, + Value: os.Getenv(codefresh.EnvVarShouldReportToCF), }, { Name: common.EnvVarEventBusSubject, Value: fmt.Sprintf("eventbus-%s", args.EventSource.Namespace), }, { - Name: "POD_NAME", + Name: common.EnvVarPodName, ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}, }, + { + Name: common.EnvVarLeaderElection, + Value: args.EventSource.Annotations[common.AnnotationLeaderElection], + }, + { + Name: common.EnvVarEventBusConfig, + Value: base64.StdEncoding.EncodeToString(busConfigBytes), + }, } - busConfigBytes, err := json.Marshal(eventBus.Status.Config) - if err != nil { - return nil, errors.Errorf("failed marshal event bus config: %v", err) - } - encodedBusConfig := base64.StdEncoding.EncodeToString(busConfigBytes) - envVars = append(envVars, corev1.EnvVar{Name: common.EnvVarEventBusConfig, Value: encodedBusConfig}) - if eventBus.Status.Config.NATS != nil { - volumes := deploymentSpec.Template.Spec.Volumes - volumeMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts - emptyDirVolName := "tmp" - volumes = append(volumes, corev1.Volume{ - Name: emptyDirVolName, VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - }) - volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/tmp"}) + volumes := []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + } - natsConf := eventBus.Status.Config.NATS - if natsConf.Auth != nil && natsConf.AccessSecret != nil { - // Mount the secret as volume instead of using evnFrom to gain the ability - // for the event source deployment to auto reload when the secret changes - volumes = append(volumes, corev1.Volume{ - Name: "auth-volume", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: natsConf.AccessSecret.Name, - Items: []corev1.KeyToPath{ - { - Key: natsConf.AccessSecret.Key, - Path: "auth.yaml", - }, + volumeMounts := []corev1.VolumeMount{ + { + Name: "tmp", + MountPath: "/tmp", + }, + } + + var secretObjs []interface{} + var accessSecret *corev1.SecretKeySelector + switch { + case eventBus.Status.Config.NATS != nil: + accessSecret = eventBus.Status.Config.NATS.AccessSecret + secretObjs = []interface{}{eventSourceCopy} + case eventBus.Status.Config.JetStream != nil: + accessSecret = eventBus.Status.Config.JetStream.AccessSecret + secretObjs = []interface{}{eventSourceCopy} + case eventBus.Status.Config.Kafka != nil: + accessSecret = nil + secretObjs = []interface{}{eventSourceCopy, eventBus} // kafka requires secrets for sasl and tls + default: + return nil, fmt.Errorf("unsupported event bus") + } + + if accessSecret != nil { + // Mount the secret as volume instead of using envFrom to gain the ability + // for the sensor deployment to auto reload when the secret changes + volumes = append(volumes, corev1.Volume{ + Name: "auth-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: accessSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: accessSecret.Key, + Path: "auth.yaml", }, }, }, - }) - volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "auth-volume", MountPath: common.EventBusAuthFileMountPath}) - } - deploymentSpec.Template.Spec.Volumes = volumes - deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volumeMounts - } else { - return nil, errors.New("unsupported event bus") + }, + }) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "auth-volume", + MountPath: common.EventBusAuthFileMountPath, + }) } - envs := deploymentSpec.Template.Spec.Containers[0].Env - envs = append(envs, envVars...) - deploymentSpec.Template.Spec.Containers[0].Env = envs + // secrets + volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(common.SecretKeySelectorType, secretObjs...) + volumes = append(volumes, volSecrets...) + volumeMounts = append(volumeMounts, volSecretMounts...) - vols := []corev1.Volume{} - volMounts := []corev1.VolumeMount{} - oldVols := deploymentSpec.Template.Spec.Volumes - oldVolMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts - if len(oldVols) > 0 { - vols = append(vols, oldVols...) - } - if len(oldVolMounts) > 0 { - volMounts = append(volMounts, oldVolMounts...) - } - volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(eventSourceCopy, common.SecretKeySelectorType) - if len(volSecrets) > 0 { - vols = append(vols, volSecrets...) - } - if len(volSecretMounts) > 0 { - volMounts = append(volMounts, volSecretMounts...) - } - volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(eventSourceCopy, common.ConfigMapKeySelectorType) - if len(volConfigMaps) > 0 { - vols = append(vols, volConfigMaps...) - } - if len(volCofigMapMounts) > 0 { - volMounts = append(volMounts, volCofigMapMounts...) - } + // config maps + volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(common.ConfigMapKeySelectorType, eventSourceCopy) + volumeMounts = append(volumeMounts, volCofigMapMounts...) + volumes = append(volumes, volConfigMaps...) - deploymentSpec.Template.Spec.Volumes = vols - deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volMounts + // Order volumes and volumemounts based on name to make the order deterministic + sort.Slice(volumes, func(i, j int) bool { + return volumes[i].Name < volumes[j].Name + }) + sort.Slice(volumeMounts, func(i, j int) bool { + return volumeMounts[i].Name < volumeMounts[j].Name + }) + + deploymentSpec.Template.Spec.Containers[0].Env = append(deploymentSpec.Template.Spec.Containers[0].Env, env...) + deploymentSpec.Template.Spec.Containers[0].VolumeMounts = append(deploymentSpec.Template.Spec.Containers[0].VolumeMounts, volumeMounts...) + deploymentSpec.Template.Spec.Volumes = append(deploymentSpec.Template.Spec.Volumes, volumes...) deployment := &appv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ @@ -277,13 +303,14 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a if err := controllerscommon.SetObjectMeta(args.EventSource, deployment, v1alpha1.SchemaGroupVersionKind); err != nil { return nil, err } + return deployment, nil } func buildDeploymentSpec(args *AdaptorArgs) (*appv1.DeploymentSpec, error) { eventSourceContainer := corev1.Container{ Image: args.Image, - ImagePullPolicy: corev1.PullAlways, + ImagePullPolicy: common.GetImagePullPolicy(), Args: []string{"eventsource-service"}, Ports: []corev1.ContainerPort{ {Name: "metrics", ContainerPort: common.EventSourceMetricsPort}, diff --git a/controllers/eventsource/resource_test.go b/controllers/eventsource/resource_test.go index dc2d2b5269..24f66e2530 100644 --- a/controllers/eventsource/resource_test.go +++ b/controllers/eventsource/resource_test.go @@ -12,6 +12,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/argoproj/argo-events/common/logging" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" ) @@ -69,6 +71,140 @@ func Test_BuildDeployment(t *testing.T) { assert.True(t, secretRefs > 0) assert.Equal(t, deployment.Spec.Template.Spec.PriorityClassName, "test-class") }) + + t.Run("test kafka eventbus secrets attached", func(t *testing.T) { + args := &AdaptorArgs{ + Image: testImage, + EventSource: testEventSource, + Labels: testLabels, + } + + // add secrets to kafka eventbus + testBus := fakeEventBusKafka.DeepCopy() + testBus.Spec.Kafka.TLS = &apicommon.TLSConfig{ + CACertSecret: &corev1.SecretKeySelector{Key: "cert", LocalObjectReference: corev1.LocalObjectReference{Name: "tls-secret"}}, + } + testBus.Spec.Kafka.SASL = &apicommon.SASLConfig{ + Mechanism: "SCRAM-SHA-512", + UserSecret: &corev1.SecretKeySelector{Key: "username", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}}, + PasswordSecret: &corev1.SecretKeySelector{Key: "password", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}}, + } + + deployment, err := buildDeployment(args, testBus) + assert.Nil(t, err) + assert.NotNil(t, deployment) + + hasSASLSecretVolume := false + hasSASLSecretVolumeMount := false + hasTLSSecretVolume := false + hasTLSSecretVolumeMount := false + for _, volume := range deployment.Spec.Template.Spec.Volumes { + if volume.Name == "secret-sasl-secret" { + hasSASLSecretVolume = true + } + if volume.Name == "secret-tls-secret" { + hasTLSSecretVolume = true + } + } + for _, volumeMount := range deployment.Spec.Template.Spec.Containers[0].VolumeMounts { + if volumeMount.Name == "secret-sasl-secret" { + hasSASLSecretVolumeMount = true + } + if volumeMount.Name == "secret-tls-secret" { + hasTLSSecretVolumeMount = true + } + } + + assert.True(t, hasSASLSecretVolume) + assert.True(t, hasSASLSecretVolumeMount) + assert.True(t, hasTLSSecretVolume) + assert.True(t, hasTLSSecretVolumeMount) + }) + + t.Run("test secret volume and volumemount order deterministic", func(t *testing.T) { + args := &AdaptorArgs{ + Image: testImage, + EventSource: testEventSource, + Labels: testLabels, + } + + webhooksWithSecrets := map[string]v1alpha1.WebhookEventSource{ + "webhook4": { + WebhookContext: v1alpha1.WebhookContext{ + URL: "http://a.b", + Endpoint: "/webhook4", + Port: "1234", + AuthSecret: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "webhook4"}, + Key: "secret", + }, + }, + }, + "webhook3": { + WebhookContext: v1alpha1.WebhookContext{ + URL: "http://a.b", + Endpoint: "/webhook3", + Port: "1234", + AuthSecret: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "webhook3"}, + Key: "secret", + }, + }, + }, + "webhook1": { + WebhookContext: v1alpha1.WebhookContext{ + URL: "http://a.b", + Endpoint: "/webhook1", + Port: "1234", + AuthSecret: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "webhook1"}, + Key: "secret", + }, + }, + }, + "webhook2": { + WebhookContext: v1alpha1.WebhookContext{ + URL: "http://a.b", + Endpoint: "/webhook2", + Port: "1234", + AuthSecret: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: "webhook2"}, + Key: "secret", + }, + }, + }, + } + args.EventSource.Spec.Webhook = webhooksWithSecrets + + wantVolumeNames := []string{"auth-volume", "cm-test-cm", "secret-test-secret", "secret-webhook1", "secret-webhook2", "secret-webhook3", "secret-webhook4", "tmp"} + wantVolumeMountNames := []string{"auth-volume", "cm-test-cm", "secret-test-secret", "secret-webhook1", "secret-webhook2", "secret-webhook3", "secret-webhook4", "tmp"} + + deployment, err := buildDeployment(args, fakeEventBus) + assert.Nil(t, err) + assert.NotNil(t, deployment) + gotVolumes := deployment.Spec.Template.Spec.Volumes + gotVolumeMounts := deployment.Spec.Template.Spec.Containers[0].VolumeMounts + + var gotVolumeNames []string + var gotVolumeMountNames []string + + for _, v := range gotVolumes { + gotVolumeNames = append(gotVolumeNames, v.Name) + } + for _, v := range gotVolumeMounts { + gotVolumeMountNames = append(gotVolumeMountNames, v.Name) + } + + assert.Equal(t, len(gotVolumes), len(wantVolumeNames)) + assert.Equal(t, len(gotVolumeMounts), len(wantVolumeMountNames)) + + for i := range gotVolumeNames { + assert.Equal(t, gotVolumeNames[i], wantVolumeNames[i]) + } + for i := range gotVolumeMountNames { + assert.Equal(t, gotVolumeMountNames[i], wantVolumeMountNames[i]) + } + }) } func TestResourceReconcile(t *testing.T) { @@ -86,35 +222,38 @@ func TestResourceReconcile(t *testing.T) { assert.False(t, testEventSource.Status.IsReady()) }) - t.Run("test resource reconcile with eventbus", func(t *testing.T) { - ctx := context.TODO() - cl := fake.NewClientBuilder().Build() - testBus := fakeEventBus.DeepCopy() - testBus.Status.MarkDeployed("test", "test") - testBus.Status.MarkConfigured() - err := cl.Create(ctx, testBus) - assert.Nil(t, err) - args := &AdaptorArgs{ - Image: testImage, - EventSource: testEventSource, - Labels: testLabels, - } - err = Reconcile(cl, args, logging.NewArgoEventsLogger()) - assert.Nil(t, err) - assert.True(t, testEventSource.Status.IsReady()) + for _, eb := range []*eventbusv1alpha1.EventBus{fakeEventBus, fakeEventBusJetstream, fakeEventBusKafka} { + testBus := eb.DeepCopy() - deployList := &appv1.DeploymentList{} - err = cl.List(ctx, deployList, &client.ListOptions{ - Namespace: testNamespace, - }) - assert.NoError(t, err) - assert.Equal(t, 1, len(deployList.Items)) + t.Run("test resource reconcile with eventbus", func(t *testing.T) { + ctx := context.TODO() + cl := fake.NewClientBuilder().Build() + testBus.Status.MarkDeployed("test", "test") + testBus.Status.MarkConfigured() + err := cl.Create(ctx, testBus) + assert.Nil(t, err) + args := &AdaptorArgs{ + Image: testImage, + EventSource: testEventSource, + Labels: testLabels, + } + err = Reconcile(cl, args, logging.NewArgoEventsLogger()) + assert.Nil(t, err) + assert.True(t, testEventSource.Status.IsReady()) - svcList := &corev1.ServiceList{} - err = cl.List(ctx, svcList, &client.ListOptions{ - Namespace: testNamespace, + deployList := &appv1.DeploymentList{} + err = cl.List(ctx, deployList, &client.ListOptions{ + Namespace: testNamespace, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(deployList.Items)) + + svcList := &corev1.ServiceList{} + err = cl.List(ctx, svcList, &client.ListOptions{ + Namespace: testNamespace, + }) + assert.NoError(t, err) + assert.Equal(t, 0, len(svcList.Items)) }) - assert.NoError(t, err) - assert.Equal(t, 0, len(svcList.Items)) - }) + } } diff --git a/controllers/eventsource/validate.go b/controllers/eventsource/validate.go index 801cddac31..0a6b76491f 100644 --- a/controllers/eventsource/validate.go +++ b/controllers/eventsource/validate.go @@ -4,8 +4,6 @@ import ( "context" "fmt" - "github.com/pkg/errors" - "github.com/argoproj/argo-events/eventsources" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" @@ -18,7 +16,7 @@ func ValidateEventSource(eventSource *v1alpha1.EventSource) error { recreateTypes[esType] = true } - servers := eventsources.GetEventingServers(eventSource, nil) + servers, _ := eventsources.GetEventingServers(eventSource, nil) eventNames := make(map[string]bool) rollingUpdates, recreates := 0, 0 @@ -37,7 +35,7 @@ func ValidateEventSource(eventSource *v1alpha1.EventSource) error { } else { // Duplicated event name not allowed in one EventSource, even they are in different EventSourceType. eventSource.Status.MarkSourcesNotProvided("InvalidEventSource", fmt.Sprintf("more than one \"%s\" found", eName)) - return errors.Errorf("more than one \"%s\" found in the spec", eName) + return fmt.Errorf("more than one %q found in the spec", eName) } err := server.ValidateEventSource(ctx) @@ -51,7 +49,7 @@ func ValidateEventSource(eventSource *v1alpha1.EventSource) error { if rollingUpdates > 0 && recreates > 0 { // We don't allow this as if we use recreate strategy for the deployment it will have downtime eventSource.Status.MarkSourcesNotProvided("InvalidEventSource", "Some types of event sources can not be put in one spec") - return errors.New("event sources with rolling update and recreate update strategy can not be put together") + return fmt.Errorf("event sources with rolling update and recreate update strategy can not be put together") } eventSource.Status.MarkSourcesProvided() diff --git a/controllers/sensor/cmd/start.go b/controllers/sensor/cmd/start.go deleted file mode 100644 index ede4511a9d..0000000000 --- a/controllers/sensor/cmd/start.go +++ /dev/null @@ -1,103 +0,0 @@ -package cmd - -import ( - "fmt" - "os" - "reflect" - - "go.uber.org/zap" - appv1 "k8s.io/api/apps/v1" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/manager/signals" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/source" - - argoevents "github.com/argoproj/argo-events" - "github.com/argoproj/argo-events/common" - "github.com/argoproj/argo-events/common/logging" - "github.com/argoproj/argo-events/controllers/sensor" - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" - sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" -) - -const ( - sensorImageEnvVar = "SENSOR_IMAGE" -) - -func Start(namespaced bool, managedNamespace string) { - logger := logging.NewArgoEventsLogger().Named(sensor.ControllerName) - sensorImage, defined := os.LookupEnv(sensorImageEnvVar) - if !defined { - logger.Fatalf("required environment variable '%s' not defined", sensorImageEnvVar) - } - opts := ctrl.Options{ - MetricsBindAddress: fmt.Sprintf(":%d", common.ControllerMetricsPort), - HealthProbeBindAddress: ":8081", - } - if namespaced { - opts.Namespace = managedNamespace - } - mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), opts) - if err != nil { - logger.Fatalw("unable to get a controller-runtime manager", zap.Error(err)) - } - - // Readyness probe - if err := mgr.AddReadyzCheck("readiness", healthz.Ping); err != nil { - logger.Fatalw("unable add a readiness check", zap.Error(err)) - } - - // Liveness probe - if err := mgr.AddHealthzCheck("liveness", healthz.Ping); err != nil { - logger.Fatalw("unable add a health check", zap.Error(err)) - } - - if err := sensorv1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatalw("unable to add Sensor scheme", zap.Error(err)) - } - - if err := eventbusv1alpha1.AddToScheme(mgr.GetScheme()); err != nil { - logger.Fatalw("uunable to add EventBus scheme", zap.Error(err)) - } - - // A controller with DefaultControllerRateLimiter - c, err := controller.New(sensor.ControllerName, mgr, controller.Options{ - Reconciler: sensor.NewReconciler(mgr.GetClient(), mgr.GetScheme(), sensorImage, logger), - }) - if err != nil { - logger.Fatalw("unable to set up individual controller", zap.Error(err)) - } - - // Watch Sensor and enqueue Sensor object key - if err := c.Watch(&source.Kind{Type: &sensorv1alpha1.Sensor{}}, &handler.EnqueueRequestForObject{}, - predicate.Or( - predicate.GenerationChangedPredicate{}, - // TODO: change to use LabelChangedPredicate with controller-runtime v0.8 - predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if e.ObjectOld == nil { - return false - } - if e.ObjectNew == nil { - return false - } - return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) - }}, - )); err != nil { - logger.Fatalw("unable to watch Sensors", zap.Error(err)) - } - - // Watch Deployments and enqueue owning Sensor key - if err := c.Watch(&source.Kind{Type: &appv1.Deployment{}}, &handler.EnqueueRequestForOwner{OwnerType: &sensorv1alpha1.Sensor{}, IsController: true}, predicate.GenerationChangedPredicate{}); err != nil { - logger.Fatalw("unable to watch Deployments", zap.Error(err)) - } - - logger.Infow("starting sensor controller", "version", argoevents.GetVersion()) - if err := mgr.Start(signals.SetupSignalHandler()); err != nil { - logger.Fatalw("unable to run sensor controller", zap.Error(err)) - } -} diff --git a/controllers/sensor/controller.go b/controllers/sensor/controller.go index 93c735c7f2..f92fa10d1f 100644 --- a/controllers/sensor/controller.go +++ b/controllers/sensor/controller.go @@ -18,17 +18,23 @@ package sensor import ( "context" + "fmt" + "github.com/pkg/errors" "go.uber.org/zap" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) @@ -45,11 +51,13 @@ type reconciler struct { sensorImage string logger *zap.SugaredLogger + + cfClient *codefresh.Client } // NewReconciler returns a new reconciler -func NewReconciler(client client.Client, scheme *runtime.Scheme, sensorImage string, logger *zap.SugaredLogger) reconcile.Reconciler { - return &reconciler{client: client, scheme: scheme, sensorImage: sensorImage, logger: logger} +func NewReconciler(client client.Client, scheme *runtime.Scheme, sensorImage string, logger *zap.SugaredLogger, cfClient *codefresh.Client) reconcile.Reconciler { + return &reconciler{client: client, scheme: scheme, sensorImage: sensorImage, logger: logger, cfClient: cfClient} } func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { @@ -63,13 +71,19 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, err } log := r.logger.With("namespace", sensor.Namespace).With("sensor", sensor.Name) + ctx = logging.WithLogger(ctx, log) sensorCopy := sensor.DeepCopy() reconcileErr := r.reconcile(ctx, sensorCopy) if reconcileErr != nil { log.Errorw("reconcile error", zap.Error(reconcileErr)) + r.cfClient.ReportError(errors.Wrap(reconcileErr, "reconcile error"), codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }) } if r.needsUpdate(sensor, sensorCopy) { - if err := r.client.Update(ctx, sensorCopy); err != nil { + // Use a DeepCopy to update, because it will be mutated afterwards, with empty Status. + if err := r.client.Update(ctx, sensorCopy.DeepCopy()); err != nil { return reconcile.Result{}, err } } @@ -81,7 +95,7 @@ func (r *reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu // reconcile does the real logic func (r *reconciler) reconcile(ctx context.Context, sensor *v1alpha1.Sensor) error { - log := r.logger.With("namespace", sensor.Namespace).With("sensor", sensor.Name) + log := logging.FromContext(ctx) if !sensor.DeletionTimestamp.IsZero() { log.Info("deleting sensor") if controllerutil.ContainsFinalizer(sensor, finalizerName) { @@ -93,7 +107,25 @@ func (r *reconciler) reconcile(ctx context.Context, sensor *v1alpha1.Sensor) err controllerutil.AddFinalizer(sensor, finalizerName) sensor.Status.InitConditions() - if err := ValidateSensor(sensor); err != nil { + + eventBus := &eventbusv1alpha1.EventBus{} + eventBusName := common.DefaultEventBusName + if len(sensor.Spec.EventBusName) > 0 { + eventBusName = sensor.Spec.EventBusName + } + err := r.client.Get(ctx, types.NamespacedName{Namespace: sensor.Namespace, Name: eventBusName}, eventBus) + if err != nil { + if apierrors.IsNotFound(err) { + sensor.Status.MarkDeployFailed("EventBusNotFound", "EventBus not found.") + log.Errorw("EventBus not found", "eventBusName", eventBusName, "error", err) + return fmt.Errorf("eventbus %s not found", eventBusName) + } + sensor.Status.MarkDeployFailed("GetEventBusFailed", "Failed to get EventBus.") + log.Errorw("failed to get EventBus", "eventBusName", eventBusName, "error", err) + return err + } + + if err := ValidateSensor(sensor, eventBus); err != nil { log.Errorw("validation error", "error", err) return err } @@ -106,7 +138,7 @@ func (r *reconciler) reconcile(ctx context.Context, sensor *v1alpha1.Sensor) err common.LabelOwnerName: sensor.Name, }, } - return Reconcile(r.client, args, log) + return Reconcile(r.client, eventBus, args, log) } func (r *reconciler) needsUpdate(old, new *v1alpha1.Sensor) bool { diff --git a/controllers/sensor/resource.go b/controllers/sensor/resource.go index a88c5f1f39..7da4e58ba1 100644 --- a/controllers/sensor/resource.go +++ b/controllers/sensor/resource.go @@ -21,9 +21,10 @@ import ( "encoding/base64" "encoding/json" "fmt" + "os" + "sort" "github.com/imdario/mergo" - "github.com/pkg/errors" "go.uber.org/zap" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -31,9 +32,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" controllerscommon "github.com/argoproj/argo-events/controllers/common" eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" @@ -48,30 +49,26 @@ type AdaptorArgs struct { } // Reconcile does the real logic -func Reconcile(client client.Client, args *AdaptorArgs, logger *zap.SugaredLogger) error { +func Reconcile(client client.Client, eventBus *eventbusv1alpha1.EventBus, args *AdaptorArgs, logger *zap.SugaredLogger) error { ctx := context.Background() sensor := args.Sensor - eventBus := &eventbusv1alpha1.EventBus{} + + if eventBus == nil { + sensor.Status.MarkDeployFailed("GetEventBusFailed", "Failed to get EventBus.") + logger.Error("failed to get EventBus") + return fmt.Errorf("failed to get EventBus") + } + eventBusName := common.DefaultEventBusName if len(sensor.Spec.EventBusName) > 0 { eventBusName = sensor.Spec.EventBusName } - err := client.Get(ctx, types.NamespacedName{Namespace: sensor.Namespace, Name: eventBusName}, eventBus) - if err != nil { - if apierrors.IsNotFound(err) { - sensor.Status.MarkDeployFailed("EventBusNotFound", "EventBus not found.") - logger.Errorw("EventBus not found", "eventBusName", eventBusName, "error", err) - return errors.Errorf("eventbus %s not found", eventBusName) - } - sensor.Status.MarkDeployFailed("GetEventBusFailed", "Failed to get EventBus.") - logger.Errorw("failed to get EventBus", "eventBusName", eventBusName, "error", err) - return err - } if !eventBus.Status.IsReady() { sensor.Status.MarkDeployFailed("EventBusNotReady", "EventBus not ready.") - logger.Errorw("event bus is not in ready status", "eventBusName", eventBusName, "error", err) - return errors.New("eventbus not ready") + logger.Errorw("event bus is not in ready status", "eventBusName", eventBusName) + return fmt.Errorf("eventbus not ready") } + expectedDeploy, err := buildDeployment(args, eventBus) if err != nil { sensor.Status.MarkDeployFailed("BuildDeploymentSpecFailed", "Failed to build Deployment spec.") @@ -135,107 +132,126 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a sensor := args.Sensor sensorCopy := &v1alpha1.Sensor{ TypeMeta: metav1.TypeMeta{ - Kind: sensor.Kind, + Kind: sensor.Kind, APIVersion: sensor.APIVersion, }, ObjectMeta: metav1.ObjectMeta{ Namespace: sensor.Namespace, Name: sensor.Name, + Labels: common.CopyStringMap(sensor.Labels), }, Spec: sensor.Spec, } sensorBytes, err := json.Marshal(sensorCopy) if err != nil { - return nil, errors.New("failed marshal sensor spec") + return nil, fmt.Errorf("failed marshal sensor spec") + } + busConfigBytes, err := json.Marshal(eventBus.Status.Config) + if err != nil { + return nil, fmt.Errorf("failed marshal event bus config: %v", err) } - encodedSensorSpec := base64.StdEncoding.EncodeToString(sensorBytes) - envVars := []corev1.EnvVar{ + + env := []corev1.EnvVar{ { Name: common.EnvVarSensorObject, - Value: encodedSensorSpec, + Value: base64.StdEncoding.EncodeToString(sensorBytes), + }, + { + Name: codefresh.EnvVarShouldReportToCF, + Value: os.Getenv(codefresh.EnvVarShouldReportToCF), }, { Name: common.EnvVarEventBusSubject, Value: fmt.Sprintf("eventbus-%s", sensor.Namespace), }, { - Name: "POD_NAME", + Name: common.EnvVarPodName, ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}}, }, + { + Name: common.EnvVarLeaderElection, + Value: args.Sensor.Annotations[common.AnnotationLeaderElection], + }, + { + Name: common.EnvVarEventBusConfig, + Value: base64.StdEncoding.EncodeToString(busConfigBytes), + }, } - busConfigBytes, err := json.Marshal(eventBus.Status.Config) - if err != nil { - return nil, errors.Errorf("failed marshal event bus config: %v", err) + volumes := []corev1.Volume{ + { + Name: "tmp", + VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, + }, + } + + volumeMounts := []corev1.VolumeMount{ + { + Name: "tmp", + MountPath: "/tmp", + }, } - encodedBusConfig := base64.StdEncoding.EncodeToString(busConfigBytes) - envVars = append(envVars, corev1.EnvVar{Name: common.EnvVarEventBusConfig, Value: encodedBusConfig}) - if eventBus.Status.Config.NATS != nil { - volumes := deploymentSpec.Template.Spec.Volumes - volumeMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts - emptyDirVolName := "tmp" - volumes = append(volumes, corev1.Volume{ - Name: emptyDirVolName, VolumeSource: corev1.VolumeSource{EmptyDir: &corev1.EmptyDirVolumeSource{}}, - }) - volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: emptyDirVolName, MountPath: "/tmp"}) - natsConf := eventBus.Status.Config.NATS - if natsConf.Auth != nil && natsConf.AccessSecret != nil { - // Mount the secret as volume instead of using evnFrom to gain the ability - // for the sensor deployment to auto reload when the secret changes - volumes = append(volumes, corev1.Volume{ - Name: "auth-volume", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: natsConf.AccessSecret.Name, - Items: []corev1.KeyToPath{ - { - Key: natsConf.AccessSecret.Key, - Path: "auth.yaml", - }, + var secretObjs []interface{} + var accessSecret *corev1.SecretKeySelector + switch { + case eventBus.Status.Config.NATS != nil: + accessSecret = eventBus.Status.Config.NATS.AccessSecret + secretObjs = []interface{}{sensorCopy} + case eventBus.Status.Config.JetStream != nil: + accessSecret = eventBus.Status.Config.JetStream.AccessSecret + secretObjs = []interface{}{sensorCopy} + case eventBus.Status.Config.Kafka != nil: + accessSecret = nil + secretObjs = []interface{}{sensorCopy, eventBus} // kafka requires secrets for sasl and tls + default: + return nil, fmt.Errorf("unsupported event bus") + } + + if accessSecret != nil { + // Mount the secret as volume instead of using envFrom to gain the ability + // for the sensor deployment to auto reload when the secret changes + volumes = append(volumes, corev1.Volume{ + Name: "auth-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: accessSecret.Name, + Items: []corev1.KeyToPath{ + { + Key: accessSecret.Key, + Path: "auth.yaml", }, }, }, - }) - volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "auth-volume", MountPath: common.EventBusAuthFileMountPath}) - } - deploymentSpec.Template.Spec.Volumes = volumes - deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volumeMounts - } else { - return nil, errors.New("unsupported event bus") + }, + }) + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "auth-volume", + MountPath: common.EventBusAuthFileMountPath, + }) } - envs := deploymentSpec.Template.Spec.Containers[0].Env - envs = append(envs, envVars...) - deploymentSpec.Template.Spec.Containers[0].Env = envs + // secrets + volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(common.SecretKeySelectorType, secretObjs...) + volumes = append(volumes, volSecrets...) + volumeMounts = append(volumeMounts, volSecretMounts...) - vols := []corev1.Volume{} - volMounts := []corev1.VolumeMount{} - oldVols := deploymentSpec.Template.Spec.Volumes - oldVolMounts := deploymentSpec.Template.Spec.Containers[0].VolumeMounts - if len(oldVols) > 0 { - vols = append(vols, oldVols...) - } - if len(oldVolMounts) > 0 { - volMounts = append(volMounts, oldVolMounts...) - } - volSecrets, volSecretMounts := common.VolumesFromSecretsOrConfigMaps(sensorCopy, common.SecretKeySelectorType) - if len(volSecrets) > 0 { - vols = append(vols, volSecrets...) - } - if len(volSecretMounts) > 0 { - volMounts = append(volMounts, volSecretMounts...) - } - volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(sensorCopy, common.ConfigMapKeySelectorType) - if len(volConfigMaps) > 0 { - vols = append(vols, volConfigMaps...) - } - if len(volCofigMapMounts) > 0 { - volMounts = append(volMounts, volCofigMapMounts...) - } + // config maps + volConfigMaps, volCofigMapMounts := common.VolumesFromSecretsOrConfigMaps(common.ConfigMapKeySelectorType, sensorCopy) + volumeMounts = append(volumeMounts, volCofigMapMounts...) + volumes = append(volumes, volConfigMaps...) + + // Order volumes and volumemounts based on name to make the order deterministic + sort.Slice(volumes, func(i, j int) bool { + return volumes[i].Name < volumes[j].Name + }) + sort.Slice(volumeMounts, func(i, j int) bool { + return volumeMounts[i].Name < volumeMounts[j].Name + }) - deploymentSpec.Template.Spec.Volumes = vols - deploymentSpec.Template.Spec.Containers[0].VolumeMounts = volMounts + deploymentSpec.Template.Spec.Containers[0].Env = append(deploymentSpec.Template.Spec.Containers[0].Env, env...) + deploymentSpec.Template.Spec.Containers[0].VolumeMounts = append(deploymentSpec.Template.Spec.Containers[0].VolumeMounts, volumeMounts...) + deploymentSpec.Template.Spec.Volumes = append(deploymentSpec.Template.Spec.Volumes, volumes...) deployment := &appv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ @@ -248,6 +264,7 @@ func buildDeployment(args *AdaptorArgs, eventBus *eventbusv1alpha1.EventBus) (*a if err := controllerscommon.SetObjectMeta(sensor, deployment, v1alpha1.SchemaGroupVersionKind); err != nil { return nil, err } + return deployment, nil } @@ -255,7 +272,7 @@ func buildDeploymentSpec(args *AdaptorArgs) (*appv1.DeploymentSpec, error) { replicas := args.Sensor.Spec.GetReplicas() sensorContainer := corev1.Container{ Image: args.Image, - ImagePullPolicy: corev1.PullAlways, + ImagePullPolicy: common.GetImagePullPolicy(), Args: []string{"sensor-service"}, Ports: []corev1.ContainerPort{ {Name: "metrics", ContainerPort: common.SensorMetricsPort}, @@ -281,7 +298,8 @@ func buildDeploymentSpec(args *AdaptorArgs) (*appv1.DeploymentSpec, error) { Selector: &metav1.LabelSelector{ MatchLabels: args.Labels, }, - Replicas: &replicas, + Replicas: &replicas, + RevisionHistoryLimit: args.Sensor.Spec.RevisionHistoryLimit, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: podTemplateLabels, diff --git a/controllers/sensor/resource_test.go b/controllers/sensor/resource_test.go index 6a3fceae18..630c251f83 100644 --- a/controllers/sensor/resource_test.go +++ b/controllers/sensor/resource_test.go @@ -29,6 +29,7 @@ import ( "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) @@ -76,11 +77,6 @@ var ( Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "k8s.io", - Version: "", - Resource: "pods", - }, Operation: "create", Source: &v1alpha1.ArtifactLocation{}, }, @@ -128,6 +124,52 @@ var ( }, }, } + + fakeEventBusJetstream = &eventbusv1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: common.DefaultEventBusName, + }, + Spec: eventbusv1alpha1.EventBusSpec{ + JetStream: &eventbusv1alpha1.JetStreamBus{ + Version: "x.x.x", + }, + }, + Status: eventbusv1alpha1.EventBusStatus{ + Config: eventbusv1alpha1.BusConfig{ + JetStream: &eventbusv1alpha1.JetStreamConfig{ + URL: "nats://xxxx", + }, + }, + }, + } + + fakeEventBusKafka = &eventbusv1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: common.DefaultEventBusName, + }, + Spec: eventbusv1alpha1.EventBusSpec{ + Kafka: &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + }, + }, + Status: eventbusv1alpha1.EventBusStatus{ + Config: eventbusv1alpha1.BusConfig{ + Kafka: &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + }, + }, + }, + } ) func Test_BuildDeployment(t *testing.T) { @@ -156,51 +198,154 @@ func Test_BuildDeployment(t *testing.T) { assert.True(t, hasTmpVolume) assert.True(t, len(deployment.Spec.Template.Spec.ImagePullSecrets) > 0) assert.Equal(t, deployment.Spec.Template.Spec.PriorityClassName, "test-class") + assert.Nil(t, deployment.Spec.RevisionHistoryLimit) + }) + t.Run("test revisionHistoryLimit", func(t *testing.T) { + sensorWithRevisionHistoryLimit := sensorObj.DeepCopy() + sensorWithRevisionHistoryLimit.Spec.RevisionHistoryLimit = func() *int32 { i := int32(3); return &i }() + args := &AdaptorArgs{ + Image: testImage, + Sensor: sensorWithRevisionHistoryLimit, + Labels: testLabels, + } + deployment, err := buildDeployment(args, fakeEventBus) + assert.Nil(t, err) + assert.NotNil(t, deployment) + assert.Equal(t, int32(3), *deployment.Spec.RevisionHistoryLimit) }) -} -func TestResourceReconcile(t *testing.T) { - t.Run("test resource reconcile without eventbus", func(t *testing.T) { - cl := fake.NewClientBuilder().Build() + t.Run("test kafka eventbus secrets attached", func(t *testing.T) { args := &AdaptorArgs{ Image: testImage, Sensor: sensorObj, Labels: testLabels, } - err := Reconcile(cl, args, logging.NewArgoEventsLogger()) - assert.Error(t, err) - assert.False(t, sensorObj.Status.IsReady()) - }) - t.Run("test resource reconcile with eventbus", func(t *testing.T) { - ctx := context.TODO() - cl := fake.NewClientBuilder().Build() - testBus := fakeEventBus.DeepCopy() - testBus.Status.MarkDeployed("test", "test") - testBus.Status.MarkConfigured() - err := cl.Create(ctx, testBus) + // add secrets to kafka eventbus + testBus := fakeEventBusKafka.DeepCopy() + testBus.Spec.Kafka.TLS = &apicommon.TLSConfig{ + CACertSecret: &corev1.SecretKeySelector{Key: "cert", LocalObjectReference: corev1.LocalObjectReference{Name: "tls-secret"}}, + } + testBus.Spec.Kafka.SASL = &apicommon.SASLConfig{ + Mechanism: "SCRAM-SHA-512", + UserSecret: &corev1.SecretKeySelector{Key: "username", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}}, + PasswordSecret: &corev1.SecretKeySelector{Key: "password", LocalObjectReference: corev1.LocalObjectReference{Name: "sasl-secret"}}, + } + + deployment, err := buildDeployment(args, testBus) assert.Nil(t, err) + assert.NotNil(t, deployment) + + hasSASLSecretVolume := false + hasSASLSecretVolumeMount := false + hasTLSSecretVolume := false + hasTLSSecretVolumeMount := false + for _, volume := range deployment.Spec.Template.Spec.Volumes { + if volume.Name == "secret-sasl-secret" { + hasSASLSecretVolume = true + } + if volume.Name == "secret-tls-secret" { + hasTLSSecretVolume = true + } + } + for _, volumeMount := range deployment.Spec.Template.Spec.Containers[0].VolumeMounts { + if volumeMount.Name == "secret-sasl-secret" { + hasSASLSecretVolumeMount = true + } + if volumeMount.Name == "secret-tls-secret" { + hasTLSSecretVolumeMount = true + } + } + + assert.True(t, hasSASLSecretVolume) + assert.True(t, hasSASLSecretVolumeMount) + assert.True(t, hasTLSSecretVolume) + assert.True(t, hasTLSSecretVolumeMount) + }) + + t.Run("test secret volume and volumemount order deterministic", func(t *testing.T) { args := &AdaptorArgs{ Image: testImage, Sensor: sensorObj, Labels: testLabels, } - err = Reconcile(cl, args, logging.NewArgoEventsLogger()) + + wantVolumeNames := []string{"test-data", "auth-volume", "tmp"} + wantVolumeMountNames := []string{"test-data", "auth-volume", "tmp"} + + deployment, err := buildDeployment(args, fakeEventBus) assert.Nil(t, err) - assert.True(t, sensorObj.Status.IsReady()) + assert.NotNil(t, deployment) + gotVolumes := deployment.Spec.Template.Spec.Volumes + gotVolumeMounts := deployment.Spec.Template.Spec.Containers[0].VolumeMounts - deployList := &appv1.DeploymentList{} - err = cl.List(ctx, deployList, &client.ListOptions{ - Namespace: testNamespace, - }) - assert.NoError(t, err) - assert.Equal(t, 1, len(deployList.Items)) + var gotVolumeNames []string + var gotVolumeMountNames []string - svcList := &corev1.ServiceList{} - err = cl.List(ctx, svcList, &client.ListOptions{ - Namespace: testNamespace, - }) - assert.NoError(t, err) - assert.Equal(t, 0, len(svcList.Items)) + for _, v := range gotVolumes { + gotVolumeNames = append(gotVolumeNames, v.Name) + } + for _, v := range gotVolumeMounts { + gotVolumeMountNames = append(gotVolumeMountNames, v.Name) + } + + assert.Equal(t, len(gotVolumes), len(wantVolumeNames)) + assert.Equal(t, len(gotVolumeMounts), len(wantVolumeMountNames)) + + for i := range gotVolumeNames { + assert.Equal(t, gotVolumeNames[i], wantVolumeNames[i]) + } + for i := range gotVolumeMountNames { + assert.Equal(t, gotVolumeMountNames[i], wantVolumeMountNames[i]) + } }) } + +func TestResourceReconcile(t *testing.T) { + t.Run("test resource reconcile without eventbus", func(t *testing.T) { + cl := fake.NewClientBuilder().Build() + args := &AdaptorArgs{ + Image: testImage, + Sensor: sensorObj, + Labels: testLabels, + } + err := Reconcile(cl, nil, args, logging.NewArgoEventsLogger()) + assert.Error(t, err) + assert.False(t, sensorObj.Status.IsReady()) + }) + + for _, eb := range []*eventbusv1alpha1.EventBus{fakeEventBus, fakeEventBusJetstream, fakeEventBusKafka} { + testBus := eb.DeepCopy() + + t.Run("test resource reconcile with eventbus", func(t *testing.T) { + ctx := context.TODO() + cl := fake.NewClientBuilder().Build() + testBus.Status.MarkDeployed("test", "test") + testBus.Status.MarkConfigured() + err := cl.Create(ctx, testBus) + assert.Nil(t, err) + args := &AdaptorArgs{ + Image: testImage, + Sensor: sensorObj, + Labels: testLabels, + } + err = Reconcile(cl, testBus, args, logging.NewArgoEventsLogger()) + assert.Nil(t, err) + assert.True(t, sensorObj.Status.IsReady()) + + deployList := &appv1.DeploymentList{} + err = cl.List(ctx, deployList, &client.ListOptions{ + Namespace: testNamespace, + }) + assert.NoError(t, err) + assert.Equal(t, 1, len(deployList.Items)) + + svcList := &corev1.ServiceList{} + err = cl.List(ctx, svcList, &client.ListOptions{ + Namespace: testNamespace, + }) + assert.NoError(t, err) + assert.Equal(t, 0, len(svcList.Items)) + }) + } +} diff --git a/controllers/sensor/validate.go b/controllers/sensor/validate.go index d57f466c0d..e480e2bcb2 100644 --- a/controllers/sensor/validate.go +++ b/controllers/sensor/validate.go @@ -19,50 +19,36 @@ package sensor import ( "fmt" "net/http" - "strings" "time" - "github.com/Knetic/govaluate" + cronlib "github.com/robfig/cron/v3" + "github.com/argoproj/argo-events/common" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" - "github.com/pkg/errors" ) // ValidateSensor accepts a sensor and performs validation against it // we return an error so that it can be logged as a message on the sensor status // the error is ignored by the operation context as subsequent re-queues would produce the same error. // Exporting this function so that external APIs can use this to validate sensor resource. -func ValidateSensor(s *v1alpha1.Sensor) error { - if err := validateDependencies(s.Spec.Dependencies); err != nil { +func ValidateSensor(s *v1alpha1.Sensor, b *eventbusv1alpha1.EventBus) error { + if s == nil { + s.Status.MarkDependenciesNotProvided("InvalidSensor", "nil sensor") + return fmt.Errorf("nil sensor") + } + if b == nil { + s.Status.MarkDependenciesNotProvided("InvalidEventBus", "nil eventbus") + return fmt.Errorf("nil eventbus") + } + if err := validateDependencies(s.Spec.Dependencies, b); err != nil { s.Status.MarkDependenciesNotProvided("InvalidDependencies", err.Error()) return err } - // DEPRECATED. - if s.Spec.DeprecatedCircuit != "" { - if s.Spec.DependencyGroups == nil { - s.Status.MarkDependenciesNotProvided("InvalidCircuit", "Dependency groups not provided.") - return errors.New("dependency groups not provided") - } - c := strings.ReplaceAll(s.Spec.DeprecatedCircuit, "-", "\\-") - expression, err := govaluate.NewEvaluableExpression(c) - if err != nil { - s.Status.MarkDependenciesNotProvided("InvalidCircuit", "Invalid circurit expression.") - return errors.Errorf("circuit expression can't be created for dependency groups. err: %+v", err) - } - - groups := make(map[string]interface{}, len(s.Spec.DependencyGroups)) - for _, group := range s.Spec.DependencyGroups { - groups[group.Name] = false - } - if _, err = expression.Evaluate(groups); err != nil { - s.Status.MarkDependenciesNotProvided("InvalidCircuit", "Circuit expression can not be evaluated for dependency groups.") - return errors.Errorf("circuit expression can't be evaluated for dependency groups. err: %+v", err) - } - } s.Status.MarkDependenciesProvided() err := validateTriggers(s.Spec.Triggers) if err != nil { - s.Status.MarkTriggersNotProvided("InvalidTriggers", "Invalid triggers.") + s.Status.MarkTriggersNotProvided("InvalidTriggers", err.Error()) return err } s.Status.MarkTriggersProvided() @@ -72,19 +58,19 @@ func ValidateSensor(s *v1alpha1.Sensor) error { // validateTriggers validates triggers func validateTriggers(triggers []v1alpha1.Trigger) error { if len(triggers) < 1 { - return errors.Errorf("no triggers found") + return fmt.Errorf("no triggers found") } trigNames := make(map[string]bool) for _, trigger := range triggers { + if err := validateTriggerTemplate(trigger.Template); err != nil { + return err + } if _, ok := trigNames[trigger.Template.Name]; ok { return fmt.Errorf("duplicate trigger name: %s", trigger.Template.Name) } trigNames[trigger.Template.Name] = true - if err := validateTriggerTemplate(trigger.Template); err != nil { - return err - } if err := validateTriggerPolicy(&trigger); err != nil { return err } @@ -98,58 +84,73 @@ func validateTriggers(triggers []v1alpha1.Trigger) error { // validateTriggerTemplate validates trigger template func validateTriggerTemplate(template *v1alpha1.TriggerTemplate) error { if template == nil { - return errors.Errorf("trigger template can't be nil") + return fmt.Errorf("trigger template can't be nil") } if template.Name == "" { - return errors.Errorf("trigger must define a name") + return fmt.Errorf("trigger must define a name") } - // DEPRECATED. - if template.DeprecatedSwitch != nil && template.DeprecatedSwitch.All != nil && template.DeprecatedSwitch.Any != nil { - return errors.Errorf("trigger condition can't have both any and all condition") + if len(template.ConditionsReset) > 0 { + for _, c := range template.ConditionsReset { + if c.ByTime == nil { + return fmt.Errorf("invalid conditionsReset") + } + parser := cronlib.NewParser(cronlib.Minute | cronlib.Hour | cronlib.Dom | cronlib.Month | cronlib.Dow) + if _, err := parser.Parse(c.ByTime.Cron); err != nil { + return fmt.Errorf("invalid cron expression %q", c.ByTime.Cron) + } + if _, err := time.LoadLocation(c.ByTime.Timezone); err != nil { + return fmt.Errorf("invalid timezone %q", c.ByTime.Timezone) + } + } } if template.K8s != nil { if err := validateK8STrigger(template.K8s); err != nil { - return errors.Wrapf(err, "trigger for template %s is invalid", template.Name) + return fmt.Errorf("trigger for template %s is invalid, %w", template.Name, err) } } if template.ArgoWorkflow != nil { if err := validateArgoWorkflowTrigger(template.ArgoWorkflow); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } if template.HTTP != nil { if err := validateHTTPTrigger(template.HTTP); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } if template.AWSLambda != nil { if err := validateAWSLambdaTrigger(template.AWSLambda); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } if template.Kafka != nil { if err := validateKafkaTrigger(template.Kafka); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } if template.NATS != nil { if err := validateNATSTrigger(template.NATS); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } if template.Slack != nil { if err := validateSlackTrigger(template.Slack); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } if template.OpenWhisk != nil { if err := validateOpenWhiskTrigger(template.OpenWhisk); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } if template.CustomTrigger != nil { if err := validateCustomTrigger(template.CustomTrigger); err != nil { - return errors.Wrapf(err, "template %s is invalid", template.Name) + return fmt.Errorf("template %s is invalid, %w", template.Name, err) + } + } + if template.Email != nil { + if err := validateEmailTrigger(template.Email); err != nil { + return fmt.Errorf("template %s is invalid, %w", template.Name, err) } } return nil @@ -158,24 +159,22 @@ func validateTriggerTemplate(template *v1alpha1.TriggerTemplate) error { // validateK8STrigger validates a kubernetes trigger func validateK8STrigger(trigger *v1alpha1.StandardK8STrigger) error { if trigger == nil { - return errors.New("k8s trigger can't be nil") + return fmt.Errorf("k8s trigger can't be nil") } if trigger.Source == nil { - return errors.New("k8s trigger does not contain an absolute action") - } - if trigger.GroupVersionResource.Size() == 0 { - return errors.New("must provide group, version and resource for the resource") + return fmt.Errorf("k8s trigger does not contain an absolute action") } + switch trigger.Operation { case "", v1alpha1.Create, v1alpha1.Patch, v1alpha1.Update, v1alpha1.Delete: default: - return errors.Errorf("unknown operation type %s", string(trigger.Operation)) + return fmt.Errorf("unknown operation type %s", string(trigger.Operation)) } if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } @@ -185,23 +184,21 @@ func validateK8STrigger(trigger *v1alpha1.StandardK8STrigger) error { // validateArgoWorkflowTrigger validates an Argo workflow trigger func validateArgoWorkflowTrigger(trigger *v1alpha1.ArgoWorkflowTrigger) error { if trigger == nil { - return errors.New("argoWorkflow trigger can't be nil") + return fmt.Errorf("argoWorkflow trigger can't be nil") } if trigger.Source == nil { - return errors.New("argoWorkflow trigger does not contain an absolute action") - } - if trigger.GroupVersionResource.Size() == 0 { - return errors.New("must provide group, version and resource for the resource") + return fmt.Errorf("argoWorkflow trigger does not contain an absolute action") } + switch trigger.Operation { - case v1alpha1.Submit, v1alpha1.Suspend, v1alpha1.Retry, v1alpha1.Resume, v1alpha1.Resubmit, v1alpha1.Terminate: + case v1alpha1.Submit, v1alpha1.SubmitFrom, v1alpha1.Suspend, v1alpha1.Retry, v1alpha1.Resume, v1alpha1.Resubmit, v1alpha1.Terminate, v1alpha1.Stop: default: - return errors.Errorf("unknown operation type %s", string(trigger.Operation)) + return fmt.Errorf("unknown operation type %s", string(trigger.Operation)) } if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } @@ -211,29 +208,29 @@ func validateArgoWorkflowTrigger(trigger *v1alpha1.ArgoWorkflowTrigger) error { // validateHTTPTrigger validates the HTTP trigger func validateHTTPTrigger(trigger *v1alpha1.HTTPTrigger) error { if trigger == nil { - return errors.New("HTTP trigger for can't be nil") + return fmt.Errorf("HTTP trigger for can't be nil") } if trigger.URL == "" { - return errors.New("server URL is not specified") + return fmt.Errorf("server URL is not specified") } if trigger.Method != "" { switch trigger.Method { case http.MethodGet, http.MethodDelete, http.MethodPatch, http.MethodPost, http.MethodPut: default: - return errors.New("only GET, DELETE, PATCH, POST and PUT methods are supported") + return fmt.Errorf("only GET, DELETE, PATCH, POST and PUT methods are supported") } } if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } if trigger.Payload != nil { for i, p := range trigger.Payload { if err := validateTriggerParameter(&p); err != nil { - return errors.Errorf("payload index: %d. err: %+v", i, err) + return fmt.Errorf("payload index: %d. err: %w", i, err) } } } @@ -243,33 +240,33 @@ func validateHTTPTrigger(trigger *v1alpha1.HTTPTrigger) error { // validateOpenWhiskTrigger validates the OpenWhisk trigger func validateOpenWhiskTrigger(trigger *v1alpha1.OpenWhiskTrigger) error { if trigger == nil { - return errors.New("openwhisk trigger for can't be nil") + return fmt.Errorf("openwhisk trigger for can't be nil") } if trigger.ActionName == "" { - return errors.New("action name is not specified") + return fmt.Errorf("action name is not specified") } if trigger.Host == "" { - return errors.New("host URL is not specified") + return fmt.Errorf("host URL is not specified") } if trigger.AuthToken != nil { if trigger.AuthToken.Name == "" || trigger.AuthToken.Key == "" { - return errors.New("auth token key and name must be specified") + return fmt.Errorf("auth token key and name must be specified") } } if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } if trigger.Payload == nil { - return errors.New("payload parameters are not specified") + return fmt.Errorf("payload parameters are not specified") } if trigger.Payload != nil { for i, p := range trigger.Payload { if err := validateTriggerParameter(&p); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } @@ -279,31 +276,28 @@ func validateOpenWhiskTrigger(trigger *v1alpha1.OpenWhiskTrigger) error { // validateAWSLambdaTrigger validates the AWS Lambda trigger func validateAWSLambdaTrigger(trigger *v1alpha1.AWSLambdaTrigger) error { if trigger == nil { - return errors.New("openfaas trigger for can't be nil") + return fmt.Errorf("openfaas trigger for can't be nil") } if trigger.FunctionName == "" { - return errors.New("function name is not specified") + return fmt.Errorf("function name is not specified") } if trigger.Region == "" { - return errors.New("region in not specified") - } - if trigger.AccessKey == nil || trigger.SecretKey == nil { - return errors.New("either accesskey or secretkey secret selector is not specified") + return fmt.Errorf("region in not specified") } if trigger.Payload == nil { - return errors.New("payload parameters are not specified") + return fmt.Errorf("payload parameters are not specified") } if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } if trigger.Payload != nil { for i, p := range trigger.Payload { if err := validateTriggerParameter(&p); err != nil { - return errors.Errorf("payload index: %d. err: %+v", i, err) + return fmt.Errorf("payload index: %d. err: %w", i, err) } } } @@ -313,21 +307,21 @@ func validateAWSLambdaTrigger(trigger *v1alpha1.AWSLambdaTrigger) error { // validateKafkaTrigger validates the kafka trigger. func validateKafkaTrigger(trigger *v1alpha1.KafkaTrigger) error { if trigger == nil { - return errors.New("trigger can't be nil") + return fmt.Errorf("trigger can't be nil") } if trigger.URL == "" { - return errors.New("broker url must not be empty") + return fmt.Errorf("broker url must not be empty") } if trigger.Payload == nil { - return errors.New("payload must not be empty") + return fmt.Errorf("payload must not be empty") } if trigger.Topic == "" { - return errors.New("topic must not be empty") + return fmt.Errorf("topic must not be empty") } if trigger.Payload != nil { for i, p := range trigger.Payload { if err := validateTriggerParameter(&p); err != nil { - return errors.Errorf("payload index: %d. err: %+v", i, err) + return fmt.Errorf("payload index: %d. err: %w", i, err) } } } @@ -337,21 +331,21 @@ func validateKafkaTrigger(trigger *v1alpha1.KafkaTrigger) error { // validateNATSTrigger validates the NATS trigger. func validateNATSTrigger(trigger *v1alpha1.NATSTrigger) error { if trigger == nil { - return errors.New("trigger can't be nil") + return fmt.Errorf("trigger can't be nil") } if trigger.URL == "" { - return errors.New("nats server url can't be empty") + return fmt.Errorf("nats server url can't be empty") } if trigger.Subject == "" { - return errors.New("nats subject can't be empty") + return fmt.Errorf("nats subject can't be empty") } if trigger.Payload == nil { - return errors.New("payload can't be nil") + return fmt.Errorf("payload can't be nil") } if trigger.Payload != nil { for i, p := range trigger.Payload { if err := validateTriggerParameter(&p); err != nil { - return errors.Errorf("payload index: %d. err: %+v", i, err) + return fmt.Errorf("payload index: %d. err: %w", i, err) } } } @@ -361,15 +355,36 @@ func validateNATSTrigger(trigger *v1alpha1.NATSTrigger) error { // validateSlackTrigger validates the Slack trigger. func validateSlackTrigger(trigger *v1alpha1.SlackTrigger) error { if trigger == nil { - return errors.New("trigger can't be nil") + return fmt.Errorf("trigger can't be nil") } if trigger.SlackToken == nil { - return errors.New("slack token can't be empty") + return fmt.Errorf("slack token can't be empty") } if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) + } + } + } + return nil +} + +// validateEmailTrigger validates the Email trigger +func validateEmailTrigger(trigger *v1alpha1.EmailTrigger) error { + if trigger == nil { + return fmt.Errorf("trigger can't be nil") + } + if trigger.Host == "" { + return fmt.Errorf("host can't be empty") + } + if 0 > trigger.Port || trigger.Port > 65535 { + return fmt.Errorf("port: %v, port should be between 0-65535", trigger.Port) + } + if trigger.Parameters != nil { + for i, parameter := range trigger.Parameters { + if err := validateTriggerParameter(¶meter); err != nil { + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } @@ -379,23 +394,23 @@ func validateSlackTrigger(trigger *v1alpha1.SlackTrigger) error { // validateCustomTrigger validates the custom trigger. func validateCustomTrigger(trigger *v1alpha1.CustomTrigger) error { if trigger == nil { - return errors.New("custom trigger for can't be nil") + return fmt.Errorf("custom trigger for can't be nil") } if trigger.ServerURL == "" { - return errors.New("custom trigger gRPC server url is not defined") + return fmt.Errorf("custom trigger gRPC server url is not defined") } if trigger.Spec == nil { - return errors.New("trigger body can't be empty") + return fmt.Errorf("trigger body can't be empty") } if trigger.Secure { - if trigger.CertSecret == nil && trigger.DeprecatedCertFilePath == "" { - return errors.New("certSecret can't be nil when the trigger server connection is secure") + if trigger.CertSecret == nil { + return fmt.Errorf("certSecret can't be nil when the trigger server connection is secure") } } if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("resource parameter index: %d. err: %+v", i, err) + return fmt.Errorf("resource parameter index: %d. err: %w", i, err) } } } @@ -407,7 +422,7 @@ func validateTriggerTemplateParameters(trigger *v1alpha1.Trigger) error { if trigger.Parameters != nil { for i, parameter := range trigger.Parameters { if err := validateTriggerParameter(¶meter); err != nil { - return errors.Errorf("template parameter index: %d. err: %+v", i, err) + return fmt.Errorf("template parameter index: %d. err: %w", i, err) } } } @@ -417,13 +432,13 @@ func validateTriggerTemplateParameters(trigger *v1alpha1.Trigger) error { // validateTriggerParameter validates a trigger parameter func validateTriggerParameter(parameter *v1alpha1.TriggerParameter) error { if parameter.Src == nil { - return errors.Errorf("parameter source can't be empty") + return fmt.Errorf("parameter source can't be empty") } if parameter.Src.DependencyName == "" { - return errors.Errorf("parameter dependency name can't be empty") + return fmt.Errorf("parameter dependency name can't be empty") } if parameter.Dest == "" { - return errors.Errorf("parameter destination can't be empty") + return fmt.Errorf("parameter destination can't be empty") } switch op := parameter.Operation; op { @@ -432,47 +447,111 @@ func validateTriggerParameter(parameter *v1alpha1.TriggerParameter) error { case v1alpha1.TriggerParameterOpPrepend: case v1alpha1.TriggerParameterOpNone: default: - return errors.Errorf("parameter operation %+v is invalid", op) + return fmt.Errorf("parameter operation %+v is invalid", op) } return nil } // perform a check to see that each event dependency is in correct format and has valid filters set if any -func validateDependencies(eventDependencies []v1alpha1.EventDependency) error { +func validateDependencies(eventDependencies []v1alpha1.EventDependency, b *eventbusv1alpha1.EventBus) error { if len(eventDependencies) < 1 { - return errors.New("no event dependencies found") + return fmt.Errorf("no event dependencies found") } + comboKeys := make(map[string]bool) for _, dep := range eventDependencies { if dep.Name == "" { - return errors.New("event dependency must define a name") + return fmt.Errorf("event dependency must define a name") } if dep.EventSourceName == "" { - return errors.New("event dependency must define the EventSourceName") + return fmt.Errorf("event dependency must define the EventSourceName") } if dep.EventName == "" { - return errors.New("event dependency must define the EventName") + return fmt.Errorf("event dependency must define the EventName") } - // EventSourceName + EventName can not be referenced more than once in one Sensor object. - comboKey := fmt.Sprintf("%s-$$$-%s", dep.EventSourceName, dep.EventName) - if _, existing := comboKeys[comboKey]; existing { - return errors.Errorf("%s and %s are referenced more than once in this Sensor object", dep.EventSourceName, dep.EventName) + if b.Spec.NATS != nil { + // For STAN, EventSourceName + EventName can not be referenced more than once in one Sensor object. + comboKey := fmt.Sprintf("%s-$$$-%s", dep.EventSourceName, dep.EventName) + if _, existing := comboKeys[comboKey]; existing { + return fmt.Errorf("event '%s' from EventSource '%s' is referenced for more than one dependency in this Sensor object", dep.EventName, dep.EventSourceName) + } + comboKeys[comboKey] = true } - comboKeys[comboKey] = true + if err := validateEventFilter(dep.Filters); err != nil { return err } + + if err := validateLogicalOperator(dep.FiltersLogicalOperator); err != nil { + return err + } } return nil } +// validateLogicalOperator verifies that the logical operator in input is equal to a supported value +func validateLogicalOperator(logOp v1alpha1.LogicalOperator) error { + if logOp != v1alpha1.AndLogicalOperator && + logOp != v1alpha1.OrLogicalOperator && + logOp != v1alpha1.EmptyLogicalOperator { + return fmt.Errorf("logical operator %s not supported", logOp) + } + return nil +} + +// validateComparator verifies that the comparator in input is equal to a supported value +func validateComparator(comp v1alpha1.Comparator) error { + if comp != v1alpha1.GreaterThanOrEqualTo && + comp != v1alpha1.GreaterThan && + comp != v1alpha1.EqualTo && + comp != v1alpha1.NotEqualTo && + comp != v1alpha1.LessThan && + comp != v1alpha1.LessThanOrEqualTo && + comp != v1alpha1.EmptyComparator { + return fmt.Errorf("comparator %s not supported", comp) + } + + return nil +} + // validateEventFilter for a sensor func validateEventFilter(filter *v1alpha1.EventDependencyFilter) error { if filter == nil { return nil } + + if err := validateLogicalOperator(filter.ExprLogicalOperator); err != nil { + return err + } + + if err := validateLogicalOperator(filter.DataLogicalOperator); err != nil { + return err + } + + if filter.Exprs != nil { + for _, expr := range filter.Exprs { + if err := validateEventExprFilter(&expr); err != nil { + return err + } + } + } + + if filter.Data != nil { + for _, data := range filter.Data { + if err := validateEventDataFilter(&data); err != nil { + return err + } + } + } + + if filter.Context != nil { + if err := validateEventCtxFilter(filter.Context); err != nil { + return err + } + } + if filter.Time != nil { if err := validateEventTimeFilter(filter.Time); err != nil { return err @@ -481,21 +560,72 @@ func validateEventFilter(filter *v1alpha1.EventDependencyFilter) error { return nil } +// validateEventExprFilter validates context filter +func validateEventExprFilter(exprFilter *v1alpha1.ExprFilter) error { + if exprFilter.Expr == "" || + len(exprFilter.Fields) == 0 { + return fmt.Errorf("one of expr filters is not valid (expr and fields must be not empty)") + } + + for _, fld := range exprFilter.Fields { + if fld.Path == "" || fld.Name == "" { + return fmt.Errorf("one of expr filters is not valid (path and name in a field must be not empty)") + } + } + + return nil +} + +// validateEventDataFilter validates context filter +func validateEventDataFilter(dataFilter *v1alpha1.DataFilter) error { + if dataFilter.Comparator != v1alpha1.EmptyComparator { + if err := validateComparator(dataFilter.Comparator); err != nil { + return err + } + } + + if dataFilter.Path == "" || + dataFilter.Type == "" || + len(dataFilter.Value) == 0 { + return fmt.Errorf("one of data filters is not valid (type, path and value must be not empty)") + } + + for _, val := range dataFilter.Value { + if val == "" { + return fmt.Errorf("one of data filters is not valid (value must be not empty)") + } + } + + return nil +} + +// validateEventCtxFilter validates context filter +func validateEventCtxFilter(ctxFilter *v1alpha1.EventContext) error { + if ctxFilter.Type == "" && + ctxFilter.Subject == "" && + ctxFilter.Source == "" && + ctxFilter.DataContentType == "" { + return fmt.Errorf("no fields specified in ctx filter (aka all events will be discarded)") + } + return nil +} + // validateEventTimeFilter validates time filter -func validateEventTimeFilter(tFilter *v1alpha1.TimeFilter) error { +func validateEventTimeFilter(timeFilter *v1alpha1.TimeFilter) error { now := time.Now().UTC() + // Parse start and stop - startTime, err := common.ParseTime(tFilter.Start, now) - if err != nil { - return err + startTime, startErr := common.ParseTime(timeFilter.Start, now) + if startErr != nil { + return startErr } - stopTime, err := common.ParseTime(tFilter.Stop, now) - if err != nil { - return err + stopTime, stopErr := common.ParseTime(timeFilter.Stop, now) + if stopErr != nil { + return stopErr } if stopTime.Equal(startTime) { - return errors.Errorf("invalid event time filter: stop '%s' is equal to start '%s", tFilter.Stop, tFilter.Start) + return fmt.Errorf("invalid event time filter: stop '%s' is equal to start '%s", timeFilter.Stop, timeFilter.Start) } return nil } @@ -526,7 +656,7 @@ func validateK8sTriggerPolicy(policy *v1alpha1.K8SResourcePolicy) error { return nil } if policy.Labels == nil { - return errors.New("resource labels are not specified") + return fmt.Errorf("resource labels are not specified") } return nil } @@ -537,7 +667,7 @@ func validateStatusPolicy(policy *v1alpha1.StatusPolicy) error { return nil } if policy.Allow == nil { - return errors.New("list of allowed response status is not specified") + return fmt.Errorf("list of allowed response status is not specified") } return nil } diff --git a/controllers/sensor/validate_test.go b/controllers/sensor/validate_test.go index d31a02a9e7..ec3e4027a1 100644 --- a/controllers/sensor/validate_test.go +++ b/controllers/sensor/validate_test.go @@ -18,42 +18,68 @@ package sensor import ( "fmt" - "io/ioutil" + "os" "strings" "testing" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/stretchr/testify/require" ) func TestValidateSensor(t *testing.T) { dir := "../../examples/sensors" - files, err := ioutil.ReadDir(dir) - assert.Nil(t, err) - for _, file := range files { - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name())) - assert.Nil(t, err) - var sensor *v1alpha1.Sensor - err = yaml.Unmarshal(content, &sensor) - assert.Nil(t, err) - err = ValidateSensor(sensor) - assert.Nil(t, err) + dirEntries, err := os.ReadDir(dir) + require.NoError(t, err) + + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + t.Run( + fmt.Sprintf("test example load: %s/%s", dir, entry.Name()), + func(t *testing.T) { + content, err := os.ReadFile(fmt.Sprintf("%s/%s", dir, entry.Name())) + assert.NoError(t, err) + + var sensor *v1alpha1.Sensor + eventBus := &eventbusv1alpha1.EventBus{Spec: eventbusv1alpha1.EventBusSpec{JetStream: &eventbusv1alpha1.JetStreamBus{}}} + err = yaml.Unmarshal(content, &sensor) + assert.NoError(t, err) + + err = ValidateSensor(sensor, eventBus) + assert.NoError(t, err) + }) } } -func TestValidDepencies(t *testing.T) { - t.Run("test duplicate deps", func(t *testing.T) { +func TestValidDependencies(t *testing.T) { + jetstreamBus := &eventbusv1alpha1.EventBus{Spec: eventbusv1alpha1.EventBusSpec{JetStream: &eventbusv1alpha1.JetStreamBus{}}} + stanBus := &eventbusv1alpha1.EventBus{Spec: eventbusv1alpha1.EventBusSpec{NATS: &eventbusv1alpha1.NATSBus{}}} + + t.Run("test duplicate deps fail for STAN", func(t *testing.T) { sObj := sensorObj.DeepCopy() sObj.Spec.Dependencies = append(sObj.Spec.Dependencies, v1alpha1.EventDependency{ Name: "fake-dep2", EventSourceName: "fake-source", EventName: "fake-one", }) - err := ValidateSensor(sObj) + err := ValidateSensor(sObj, stanBus) assert.NotNil(t, err) - assert.Equal(t, true, strings.Contains(err.Error(), "more than once")) + assert.Equal(t, true, strings.Contains(err.Error(), "is referenced for more than one dependency")) + }) + + t.Run("test duplicate deps are fine for Jetstream", func(t *testing.T) { + sObj := sensorObj.DeepCopy() + sObj.Spec.Dependencies = append(sObj.Spec.Dependencies, v1alpha1.EventDependency{ + Name: "fake-dep2", + EventSourceName: "fake-source", + EventName: "fake-one", + }) + err := ValidateSensor(sObj, jetstreamBus) + assert.Nil(t, err) }) t.Run("test empty event source name", func(t *testing.T) { @@ -62,7 +88,7 @@ func TestValidDepencies(t *testing.T) { Name: "fake-dep2", EventName: "fake-one", }) - err := ValidateSensor(sObj) + err := ValidateSensor(sObj, jetstreamBus) assert.NotNil(t, err) assert.Equal(t, true, strings.Contains(err.Error(), "must define the EventSourceName")) }) @@ -73,7 +99,7 @@ func TestValidDepencies(t *testing.T) { Name: "fake-dep2", EventSourceName: "fake-source", }) - err := ValidateSensor(sObj) + err := ValidateSensor(sObj, jetstreamBus) assert.NotNil(t, err) assert.Equal(t, true, strings.Contains(err.Error(), "must define the EventName")) }) @@ -84,12 +110,341 @@ func TestValidDepencies(t *testing.T) { EventSourceName: "fake-source2", EventName: "fake-one2", }) - err := ValidateSensor(sObj) + err := ValidateSensor(sObj, jetstreamBus) assert.NotNil(t, err) assert.Equal(t, true, strings.Contains(err.Error(), "must define a name")) }) } +func TestValidateLogicalOperator(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + logOp := v1alpha1.OrLogicalOperator + + err := validateLogicalOperator(logOp) + + assert.NoError(t, err) + }) + + t.Run("test not valid", func(t *testing.T) { + logOp := v1alpha1.LogicalOperator("fake") + + err := validateLogicalOperator(logOp) + + assert.Error(t, err) + }) +} + +func TestValidateComparator(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + comp := v1alpha1.NotEqualTo + + err := validateComparator(comp) + + assert.NoError(t, err) + }) + + t.Run("test not valid", func(t *testing.T) { + comp := v1alpha1.Comparator("fake") + + err := validateComparator(comp) + + assert.Error(t, err) + }) +} + +func TestValidateEventFilter(t *testing.T) { + t.Run("test empty", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{} + + err := validateEventFilter(filter) + + assert.NoError(t, err) + }) + + t.Run("test valid, all", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + ExprLogicalOperator: v1alpha1.OrLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ + { + Expr: "fake-expr", + Fields: []v1alpha1.PayloadField{ + { + Path: "fake-path", + Name: "fake-name", + }, + }, + }, + }, + DataLogicalOperator: v1alpha1.OrLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + Path: "fake-path", + Type: "fake-type", + Value: []string{ + "fake-value", + }, + }, + }, + Context: &v1alpha1.EventContext{ + Type: "type", + Subject: "subject", + Source: "source", + DataContentType: "fake-content-type", + }, + Time: &v1alpha1.TimeFilter{ + Start: "00:00:00", + Stop: "06:00:00", + }, + } + + err := validateEventFilter(filter) + + assert.NoError(t, err) + }) + + t.Run("test valid, expr only", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Exprs: []v1alpha1.ExprFilter{ + { + Expr: "fake-expr", + Fields: []v1alpha1.PayloadField{ + { + Path: "fake-path", + Name: "fake-name", + }, + }, + }, + }, + } + + err := validateEventFilter(filter) + + assert.NoError(t, err) + }) + + t.Run("test valid, data only", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Data: []v1alpha1.DataFilter{ + { + Path: "fake-path", + Type: "fake-type", + Value: []string{ + "fake-value", + }, + }, + }, + } + + err := validateEventFilter(filter) + + assert.NoError(t, err) + }) + + t.Run("test valid, ctx only", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "type", + Subject: "subject", + Source: "source", + DataContentType: "fake-content-type", + }, + } + + err := validateEventFilter(filter) + + assert.NoError(t, err) + }) + + t.Run("test valid, time only", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "00:00:00", + Stop: "06:00:00", + }, + } + + err := validateEventFilter(filter) + + assert.NoError(t, err) + }) + + t.Run("test not valid, wrong logical operator", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: "fake", + } + + err := validateEventFilter(filter) + + assert.Error(t, err) + }) +} + +func TestValidateEventExprFilter(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + exprFilter := &v1alpha1.ExprFilter{ + Expr: "fake-expr", + Fields: []v1alpha1.PayloadField{ + { + Path: "fake-path", + Name: "fake-name", + }, + }, + } + + err := validateEventExprFilter(exprFilter) + + assert.NoError(t, err) + }) + + t.Run("test not valid, no expr", func(t *testing.T) { + exprFilter := &v1alpha1.ExprFilter{ + Fields: []v1alpha1.PayloadField{ + { + Path: "fake-path", + Name: "fake-name", + }, + }, + } + + err := validateEventExprFilter(exprFilter) + + assert.Error(t, err) + }) + + t.Run("test not valid, no field name", func(t *testing.T) { + exprFilter := &v1alpha1.ExprFilter{ + Expr: "fake-expr", + Fields: []v1alpha1.PayloadField{ + { + Path: "fake-path", + }, + }, + } + + err := validateEventExprFilter(exprFilter) + + assert.Error(t, err) + }) +} + +func TestValidateEventDataFilter(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + dataFilter := &v1alpha1.DataFilter{ + Path: "body.value", + Type: "number", + Value: []string{"50.0"}, + } + + err := validateEventDataFilter(dataFilter) + + assert.NoError(t, err) + }) + + t.Run("test not valid, no path", func(t *testing.T) { + dataFilter := &v1alpha1.DataFilter{ + Type: "number", + Value: []string{"50.0"}, + } + + err := validateEventDataFilter(dataFilter) + + assert.Error(t, err) + }) + + t.Run("test not valid, empty value", func(t *testing.T) { + dataFilter := &v1alpha1.DataFilter{ + Path: "body.value", + Type: "string", + Value: []string{""}, + } + + err := validateEventDataFilter(dataFilter) + + assert.Error(t, err) + }) + + t.Run("test not valid, wrong comparator", func(t *testing.T) { + dataFilter := &v1alpha1.DataFilter{ + Comparator: "fake", + Path: "body.value", + Type: "string", + Value: []string{""}, + } + + err := validateEventDataFilter(dataFilter) + + assert.Error(t, err) + }) +} + +func TestValidateEventCtxFilter(t *testing.T) { + t.Run("test all fields", func(t *testing.T) { + ctxFilter := &v1alpha1.EventContext{ + Type: "fake-type", + Subject: "fake-subject", + Source: "fake-source", + DataContentType: "fake-content-type", + } + + err := validateEventCtxFilter(ctxFilter) + + assert.NoError(t, err) + }) + + t.Run("test single field", func(t *testing.T) { + ctxFilter := &v1alpha1.EventContext{ + Type: "fake-type", + } + + err := validateEventCtxFilter(ctxFilter) + + assert.NoError(t, err) + }) + + t.Run("test no fields", func(t *testing.T) { + ctxFilter := &v1alpha1.EventContext{} + + err := validateEventCtxFilter(ctxFilter) + + assert.Error(t, err) + }) +} + +func TestValidateEventTimeFilter(t *testing.T) { + t.Run("test start < stop", func(t *testing.T) { + timeFilter := &v1alpha1.TimeFilter{ + Start: "00:00:00", + Stop: "06:00:00", + } + + err := validateEventTimeFilter(timeFilter) + + assert.NoError(t, err) + }) + + t.Run("test stop < start", func(t *testing.T) { + timeFilter := &v1alpha1.TimeFilter{ + Start: "06:00:00", + Stop: "00:00:00", + } + + err := validateEventTimeFilter(timeFilter) + + assert.NoError(t, err) + }) + + t.Run("test start = stop", func(t *testing.T) { + timeFilter := &v1alpha1.TimeFilter{ + Start: "00:00:00", + Stop: "00:00:00", + } + + err := validateEventTimeFilter(timeFilter) + + assert.Error(t, err) + }) +} + func TestValidTriggers(t *testing.T) { t.Run("duplicate trigger names", func(t *testing.T) { triggers := []v1alpha1.Trigger{ @@ -97,11 +452,6 @@ func TestValidTriggers(t *testing.T) { Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "k8s.io", - Version: "", - Resource: "pods", - }, Operation: "create", Source: &v1alpha1.ArtifactLocation{}, }, @@ -111,11 +461,6 @@ func TestValidTriggers(t *testing.T) { Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "k8s.io", - Version: "", - Resource: "pods", - }, Operation: "create", Source: &v1alpha1.ArtifactLocation{}, }, @@ -126,4 +471,66 @@ func TestValidTriggers(t *testing.T) { assert.NotNil(t, err) assert.Equal(t, true, strings.Contains(err.Error(), "duplicate trigger name:")) }) + + t.Run("empty trigger template", func(t *testing.T) { + triggers := []v1alpha1.Trigger{ + { + Template: nil, + }, + } + err := validateTriggers(triggers) + assert.NotNil(t, err) + assert.Equal(t, true, strings.Contains(err.Error(), "trigger template can't be nil")) + }) + + t.Run("invalid conditions reset - cron", func(t *testing.T) { + triggers := []v1alpha1.Trigger{ + { + Template: &v1alpha1.TriggerTemplate{ + Name: "fake-trigger", + Conditions: "A && B", + ConditionsReset: []v1alpha1.ConditionsResetCriteria{ + { + ByTime: &v1alpha1.ConditionsResetByTime{ + Cron: "a * * * *", + }, + }, + }, + K8s: &v1alpha1.StandardK8STrigger{ + Operation: "create", + Source: &v1alpha1.ArtifactLocation{}, + }, + }, + }, + } + err := validateTriggers(triggers) + assert.NotNil(t, err) + assert.Equal(t, true, strings.Contains(err.Error(), "invalid cron expression")) + }) + + t.Run("invalid conditions reset - timezone", func(t *testing.T) { + triggers := []v1alpha1.Trigger{ + { + Template: &v1alpha1.TriggerTemplate{ + Name: "fake-trigger", + Conditions: "A && B", + ConditionsReset: []v1alpha1.ConditionsResetCriteria{ + { + ByTime: &v1alpha1.ConditionsResetByTime{ + Cron: "* * * * *", + Timezone: "fake", + }, + }, + }, + K8s: &v1alpha1.StandardK8STrigger{ + Operation: "create", + Source: &v1alpha1.ArtifactLocation{}, + }, + }, + }, + } + err := validateTriggers(triggers) + assert.NotNil(t, err) + assert.Equal(t, true, strings.Contains(err.Error(), "invalid timezone")) + }) } diff --git a/docs/CONTRIBUTING.md b/docs/CONTRIBUTING.md new file mode 100644 index 0000000000..72a154f80b --- /dev/null +++ b/docs/CONTRIBUTING.md @@ -0,0 +1,56 @@ +# Contributing + +## How To Provide Feedback + +Please [raise an issue in Github](https://github.com/argoproj/argo-events/issues). + +## Code of Conduct + +See [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). + +## Contributor Meetings + +A weekly opportunity for committers and maintainers of Workflows, Events, and Dataflow to discuss their current work and +talk about what’s next. Feel free to join us! For Contributor Meeting information, minutes and recordings +please [see here](https://bit.ly/argo-data-weekly). + +## How To Contribute + +We're always looking for contributors. + +- Documentation - something missing or unclear? Please submit a pull request! +- Code contribution - investigate + a [good first issue](https://github.com/argoproj/argo-events/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) + , or anything not assigned. +- Join the `#argo-contributors` channel on [our Slack](https://argoproj.github.io/community/join-slack). + +### Running Locally + +To run Argo Events locally for development: [developer guide](developer_guide.md). + +### Dependencies + +Dependencies increase the risk of security issues and have on-going maintenance costs. + +The dependency must pass these test: + +- A strong use case. +- It has an acceptable license (e.g. MIT). +- It is actively maintained. +- It has no security issues. + +Example, should we add `fasttemplate` +, [view the Snyk report](https://snyk.io/advisor/golang/github.com/valyala/fasttemplate): + +| Test | Outcome | +| --------------------------------------- | ------------------------------------ | +| A strong use case. | ❌ Fail. We can use `text/template`. | +| It has an acceptable license (e.g. MIT) | ✅ Pass. MIT license. | +| It is actively maintained. | ❌ Fail. Project is inactive. | +| It has no security issues. | ✅ Pass. No known security issues. | + +No, we should not add that dependency. + +### Contributor Workshop + +We have a [90m video on YouTube](https://youtu.be/zZv0lNCDG9w) show you have to get hands-on contributing. diff --git a/docs/FAQ.md b/docs/FAQ.md index 77f9ade5bb..5aaa044736 100644 --- a/docs/FAQ.md +++ b/docs/FAQ.md @@ -10,7 +10,7 @@ **Q. Can I deploy event-source and sensor in a namespace different than `argo-events`?** -**A**. Yes. If you want to deploy the event-source in a different namespace than `argo-events`, please update the event-source definition +**A**. Yes. If you want to deploy the event-source in a different namespace than `argo-events`, please update the event-source definition with the desired namespace and service account. Make sure to grant the service account the [necessary roles](https://github.com/argoproj/argo-events/blob/master/manifests/namespace-install.yaml). **Q. How to debug Argo-Events.** @@ -29,7 +29,7 @@ Note: You can set the environment variable `DEBUG_LOG:true` in any of the contai **Q. The event-source pod is receiving events but nothing happens.** -**A**. +**A**. 1. Check the sensor resource is deployed and a pod is running for the resource. If the sensor pod is running, check for `Started to subscribe events for triggers` in the logs. @@ -39,7 +39,7 @@ If the sensor has subscribed to the event-bus but is unable to create the trigge **Q. Helm chart installation does not work.** -**A.** The Helm chart for argo events is maintained by the community and can be out of sync with latest release version. +**A.** The Helm chart for argo events is maintained by the community and can be out of sync with latest release version. The official installation file is available [here](https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml). If you notice the Helm chart is outdated, we encourage you to contribute to the [argo-helm](https://github.com/argoproj/argo-helm) repository on GitHub. @@ -64,5 +64,5 @@ set up the AWS SNS event-source. **Q. Where can I find the event structure for a particular event-source?** -**A.** Please refer to [this file](https://github.com/argoproj/argo-events/blob/master/pkg/apis/eventsource/v1alpha1/types.go) to understand the structure +**A.** Please refer to [this file](https://github.com/argoproj/argo-events/blob/master/pkg/apis/eventsource/v1alpha1/types.go) to understand the structure of different types of events dispatched by the event-source pod. diff --git a/docs/concepts/architecture.md b/docs/concepts/architecture.md index 378d8ece9a..9c1503518c 100644 --- a/docs/concepts/architecture.md +++ b/docs/concepts/architecture.md @@ -6,7 +6,6 @@
- Main components of Argo Events are: 1. [Event Source](https://argoproj.github.io/argo-events/concepts/event_source/) diff --git a/docs/concepts/event_source.md b/docs/concepts/event_source.md index a4c1592768..b32771ea41 100644 --- a/docs/concepts/event_source.md +++ b/docs/concepts/event_source.md @@ -1,6 +1,6 @@ # Event Source -An `EventSource` defines the configurations required to consume events from external sources like AWS SNS, SQS, GCP PubSub, Webhooks, etc. It further +An `EventSource` defines the configurations required to consume events from external sources like AWS SNS, SQS, GCP PubSub, Webhooks, etc. It further transforms the events into the [cloudevents](https://github.com/cloudevents/spec) and dispatches them over to the eventbus. Available event-sources: @@ -9,7 +9,10 @@ Available event-sources: 1. AWS SNS 1. AWS SQS 1. Azure Events Hub -1. Cron Schedules +1. Azure Queue Storage +1. Bitbucket +1. Bitbucket Server +1. Calendar 1. Emitter 1. File Based Events 1. GCP PubSub @@ -20,19 +23,20 @@ Available event-sources: 1. K8s Resources 1. Kafka 1. Minio -1. MQTT 1. NATS -1. Pulsar -1. Slack 1. NetApp StorageGrid -1. Webhooks -1. Stripe +1. MQTT 1. NSQ +1. Pulsar 1. Redis - +1. Slack +1. Stripe +1. Webhooks ## Specification + The complete specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md). ## Examples + Examples are located under [examples/event-sources](https://github.com/argoproj/argo-events/tree/master/examples/event-sources). diff --git a/docs/concepts/eventbus.md b/docs/concepts/eventbus.md index 4930ce7f48..9478f8068f 100644 --- a/docs/concepts/eventbus.md +++ b/docs/concepts/eventbus.md @@ -1,7 +1,7 @@ -# Eventbus +# EventBus -The eventbus acts as the transport layer of Argo-Events by connecting the event-sources and sensors. +The EventBus acts as the transport layer of Argo-Events by connecting the EventSources and Sensors. -Event-Sources publish the events while the sensors subscribe to the events to execute triggers. +EventSources publish the events while the Sensors subscribe to the events to execute triggers. -The current implementation of the eventbus is powered by NATS streaming. +There are three implementations of the EventBus: [NATS](https://docs.nats.io/legacy/stan/intro#:~:text=NATS%20Streaming%20is%20a%20data,with%20the%20core%20NATS%20platform.) (deprecated), [Jetstream](https://docs.nats.io/nats-concepts/jetstream), and [Kafka](https://kafka.apache.org). diff --git a/docs/concepts/sensor.md b/docs/concepts/sensor.md index 5f700a2c4b..8cfcfe59a2 100644 --- a/docs/concepts/sensor.md +++ b/docs/concepts/sensor.md @@ -1,12 +1,16 @@ # Sensor -Sensor defines a set of event dependencies (inputs) and triggers (outputs). -It listens to events on the eventbus and acts as an event dependency manager to resolve and execute the triggers. + +Sensor defines a set of event dependencies (inputs) and triggers (outputs). +It listens to events on the eventbus and acts as an event dependency manager to resolve and execute the triggers. ## Event dependency + A dependency is an event the sensor is waiting to happen. ## Specification + Complete specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md). ## Examples -Examples are located under [examples/sensors](https://github.com/argoproj/argo-events/tree/master/examples/sensors). \ No newline at end of file + +Examples are located under [examples/sensors](https://github.com/argoproj/argo-events/tree/master/examples/sensors). diff --git a/docs/concepts/trigger.md b/docs/concepts/trigger.md index 9ce3818bf7..43f4388e2b 100644 --- a/docs/concepts/trigger.md +++ b/docs/concepts/trigger.md @@ -10,7 +10,7 @@ dependencies are resolved. 1. Argo Rollouts 1. Argo Workflows 1. Custom - Build Your Own -1. HTTP Requests - Serverless Workloads (OpenFaas, Kubeless, KNative etc.) +1. HTTP Requests - Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) 1. Kafka Messages 1. NATS Messages 1. Slack Notifications diff --git a/docs/developer_guide.md b/docs/developer_guide.md index 1a235c24d1..7880442e2d 100644 --- a/docs/developer_guide.md +++ b/docs/developer_guide.md @@ -8,29 +8,29 @@ another cluster you can ignore the Minikube specific step 3. ### Requirements -- Golang 1.15 +- Golang 1.20+ - Docker ### Installation & Setup -#### 1. Get the project. +#### 1. Get the project ``` git clone git@github.com:argoproj/argo-events cd argo-events ``` -#### 2. Start Minikube and point Docker Client to Minikube's Docker Daemon. +#### 2. Start Minikube and point Docker Client to Minikube's Docker Daemon ``` minikube start eval $(minikube docker-env) ``` -#### 3. Build the project. +#### 3. Build the project ``` -make all +make build ``` ### Changing Types @@ -39,5 +39,5 @@ If you're making a change to the `pkg/apis` package, please ensure you re-run following command for code regeneration. ``` -$ make codegen +make codegen ``` diff --git a/docs/dr_ha_recommendations.md b/docs/dr_ha_recommendations.md index c8c87e6921..3a6f7683ad 100644 --- a/docs/dr_ha_recommendations.md +++ b/docs/dr_ha_recommendations.md @@ -79,7 +79,7 @@ spec: topologyKey: kubernetes.io/hostname ``` -To do AZ (Availablity Zone) anti-affinity, change the value of `topologyKey` +To do AZ (Availability Zone) anti-affinity, change the value of `topologyKey` from `kubernetes.io/hostname` to `topology.kubernetes.io/zone`. Besides `affinity`, @@ -98,6 +98,25 @@ could reduce the chance of PODs being evicted. Priority could be set through `spec.nats.native.priorityClassName` or `spec.nats.native.priority`. +### PDB + +EventBus service is essential to EventSource and Sensor Pods, it would be better to have a `PodDisruptionBudget` to prevent it from [Pod Disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/). The following PDB object states `maxUnavailable` is 1, which is suitable for a 3 replica EventBus object. + +If your EventBus has a name other than `default`, change it accordingly in the yaml. + +```yaml +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: eventbus-default-pdb +spec: + maxUnavailable: 1 + selector: + matchLabels: + controller: eventbus-controller + eventbus-name: default +``` + ## EventSources ### Replicas diff --git a/docs/eventbus/antiaffinity.md b/docs/eventbus/antiaffinity.md new file mode 100644 index 0000000000..721a90ca95 --- /dev/null +++ b/docs/eventbus/antiaffinity.md @@ -0,0 +1,34 @@ +## Anti-affinity + +Kubernetes offers a concept of [anti-affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/), meaning that pods are scheduled on separate nodes. The anti-affinity can either be "best effort" or a hard requirement. + +A best effort and a hard requirement node anti-affinity config look like + below, if you want to do AZ (Availability Zone) anti-affinity, change the value + of `topologyKey` from `kubernetes.io/hostname` to + `topology.kubernetes.io/zone`. + +```yaml +# Best effort +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + controller: eventbus-controller + eventbus-name: default + topologyKey: kubernetes.io/hostname + weight: 100 +``` + +```yaml +# Hard requirement +affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + controller: eventbus-controller + eventbus-name: default + topologyKey: kubernetes.io/hostname +``` diff --git a/docs/eventbus/eventbus.md b/docs/eventbus/eventbus.md new file mode 100644 index 0000000000..50b7e41189 --- /dev/null +++ b/docs/eventbus/eventbus.md @@ -0,0 +1,23 @@ +# EventBus + +![GA](../assets/ga.svg) + +> v0.17.0 and after + +EventBus is a Kubernetes +[Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) +which is used for event transmission from EventSources to Sensors. Currently, +EventBus is backed by [NATS](https://docs.nats.io/), including both their NATS +Streaming service, their newer Jetstream service, and Kafka. In the future, +this can be expanded to support other technologies as well. + +EventBus is namespaced; an EventBus object is required in a namespace to make +EventSource and Sensor work. + +The common practice is to create an EventBus named `default` in the namespace. If +you want to use a different name, or you want to have multiple EventBus in one +namespace, you need to specify `eventBusName` in the spec of EventSource and +Sensor correspondingly, so that they can find the right one. See EventSource +[spec](https://github.com/argoproj/argo-events/tree/stable/api/event-source.md#eventsourcespec) +and Sensor +[spec](https://github.com/argoproj/argo-events/tree/stable/api/sensor.md#sensorspec). diff --git a/docs/eventbus/jetstream.md b/docs/eventbus/jetstream.md new file mode 100644 index 0000000000..2ed190143c --- /dev/null +++ b/docs/eventbus/jetstream.md @@ -0,0 +1,84 @@ +## Jetstream + +[Jetstream](https://docs.nats.io/nats-concepts/jetstream) is the latest streaming server implemented by the NATS community, with improvements from the original NATS Streaming (which will eventually be deprecated). + +A simplest Jetstream EventBus example: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: default +spec: + jetstream: + version: latest # Do NOT use "latest" but a specific version in your real deployment +``` + +The example above brings up a Jetstream +[StatefulSet](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) +with 3 replicas in the namespace. + +## Properties + +Check +[here](https://github.com/argoproj/argo-events/blob/master/api/event-bus.md#argoproj.io/v1alpha1.JetstreamBus) +for the full spec of `jetstream`. + +### version + +The version number specified in the example above is the release number for the NATS server. We will support some subset of these as we've tried them out and only plan to upgrade them as needed. To take a look at what that includes: + +``` +kubectl get configmap argo-events-controller-config -o yaml +``` + +### A more involved example + +Another example with more configuration: + +``` +apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: default +spec: + jetstream: + version: latest # Do NOT use "latest" but a specific version in your real deployment + replicas: 5 + persistence: # optional + storageClassName: standard + accessMode: ReadWriteOnce + volumeSize: 10Gi + streamConfig: | # see default values in argo-events-controller-config + maxAge: 24h + settings: | + max_file_store: 1GB # see default values in argo-events-controller-config + startArgs: + - "-D" # debug-level logs +``` + +## Security + +For Jetstream, TLS is turned on for all client-server communication as well as between Jetstream nodes. In addition, for client-server communication we by default use password authentication (and because TLS is turned on, the password is encrypted). + +## How it works under the hood + +Jetstream has the concept of a Stream, and Subjects (i.e. topics) which are used on a Stream. From the documentation: “Each Stream defines how messages are stored and what the limits (duration, size, interest) of the retention are.” For Argo Events, we have one Stream called "default" with a single set of settings, but we have multiple subjects, each of which is named `default..`. Sensors subscribe to the subjects they need using durable consumers. + +### Exotic + +To use an existing JetStream service, follow the example below. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: default +spec: + jetstreamExotic: + url: nats://xxxxx:xxx + accessSecret: + name: my-secret-name + key: secret-key + streamConfig: "" +``` diff --git a/docs/eventbus/kafka.md b/docs/eventbus/kafka.md new file mode 100644 index 0000000000..b3482c8816 --- /dev/null +++ b/docs/eventbus/kafka.md @@ -0,0 +1,117 @@ +Kafka is a widely used event streaming platform. We recommend using Kafka if +you have a lot of events and want to horizontally scale your Sensors. If you +are looking to get started quickly with Argo Events we recommend using +Jetstream instead. + +When using a Kafka EventBus you must already have a Kafka cluster set up and +topics created (unless you have auto create enabled, see [topics](#topics) +below). + +## Example +```yaml +kind: EventBus +metadata: + name: default +spec: + kafka: + url: kafka:9092 # must be managed independently + topic: "example" # optional +``` + +See [here](https://github.com/argoproj/argo-events/blob/master/api/event-bus.md#kafkabus) +for the full specification. + +## Properties +### url +Comma seperated list of kafka broker urls, the kafka broker must be managed +independently of Argo Events. + +### topic +The topic name, defaults to `{namespace-name}-{eventbus-name}`. Two additional +topics per Sensor are also required, see see [topics](#topics) below for more +information. + +### version +Kafka version, we recommend not manually setting this field in most +circumstances. Defaults to the oldest supported stable version. + +### tls +Enables TLS on the kafka connection. +``` +tls: + caCertSecret: + name: my-secret + key: ca-cert-key + clientCertSecret: + name: my-secret + key: client-cert-key + clientKeySecret: + name: my-secret + key: client-key-key +``` + +### sasl +Enables SASL authentication on the kafka connection. +``` +sasl: + mechanism: PLAIN + passwordSecret: + key: password + name: my-user + userSecret: + key: user + name: my-user +``` + +### consumerGroup.groupName +Consumer group name, defaults to `{namespace-name}-{sensor-name}`. + +### consumerGroup.rebalanceStrategy +The kafka rebalance strategy, can be one of: sticky, roundrobin, range. +Defaults to range. + +### consumerGroup.startOldest +When starting up a new group do we want to start from the oldest event +(true) or the newest event (false). Defaults to false + +## Security +You can enable TLS or SASL authentication, see above for configuration +details. You must enable these features in your Kafka Cluster and make +the certifactes/credentials available in a Kubernetes secret. + +## Topics +The Kafka EventBus requires one event topic and two additional topics (trigger +and action) per Sensor. These topics will not be created automatically unless +the Kafka `auto.create.topics.enable` cluster configuration is set to true, +otherwise it is your responsibility to create these topics. If a topic does +not exist and cannot be automatically created, the EventSource and/or Sensor +will exit with an error. + +If you want to take advantage of the horizontal scaling enabled by the Kafka +EventBus be sure to create topics with more than one partition. + +By default the topics are named as follows. + +| topic | name | +| ----- | ---- | +| event | `{namespace}-{eventbus-name}` | +| trigger | `{namespace}-{eventbus-name}-{sensor-name}-trigger` | +| action | `{namespace}-{eventbus-name}-{sensor-name}-action` | + +If a topic name is specified in the EventBus specification, then the topics are +named as follows. + +| topic | name | +| ----- | ---- | +| event | `{spec.kafka.topic}` | +| trigger | `{spec.kafka.topic}-{sensor-name}-trigger` | +| action | `{spec.kafka.topic}-{sensor-name}-action` | + +## Horizontal Scaling and Leader Election + +Sensors that use a Kafka EventBus can scale horizontally. Specifiying replicas +greater than one will result in all Sensor pods actively processing events. +However, an EventSource that uses a Kafka EventBus cannot necessarily be +horizontally scaled in an active-active manner, see [EventSource HA](../eventsources/ha.md) +for more details. In an active-passive scenario a [Kubernetes leader election](../eventsources/ha.md#kubernetes-leader-election) +is used. diff --git a/docs/eventbus.md b/docs/eventbus/stan.md similarity index 67% rename from docs/eventbus.md rename to docs/eventbus/stan.md index bca374d75e..5e6e234438 100644 --- a/docs/eventbus.md +++ b/docs/eventbus/stan.md @@ -1,26 +1,3 @@ -# EventBus - -![GA](assets/ga.svg) - -> v0.17.0 and after - -EventBus is a Kubernetes -[Custom Resource](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) -which is used for events transmission from EventSources to Sensors. Currently -EventBus is backed by -[NATS Streaming](https://github.com/nats-io/nats-streaming-server), and it is -open to support other technologies. - -EventBus is namespaced, an EventBus object is required in a namespace to make -EventSource and Sensor work. - -The common pratice is to create an EventBus named `default` in the namespace. If -you want to use a different name, or you want to have multiple EventBus in one -namespace, you need to specifiy `eventBusName` in the spec of EventSource and -Sensor correspondingly, so that they can find the right one. See EventSource -[spec](https://github.com/argoproj/argo-events/tree/stable/api/event-source.md#eventsourcespec) -and Sensor -[spec](https://github.com/argoproj/argo-events/tree/stable/api/sensor.md#sensorspec). ## NATS Streaming @@ -105,6 +82,14 @@ for the full spec of `native`. - `maxAge` - Max Age of existing messages, i.e. `72h`, `4h35m`, defaults to `72h`. +- `maxMsgs` - Max number of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1000000. + +- `maxBytes` - Total size of messages before expiring the oldest messages, 0 means unlimited. Defaults to 1GB. + +- `maxSubs` - Maximum number of subscriptions, 0 means unlimited. Defaults to 1000. + +- `maxPayload` - Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB. + - `imagePullSecrets` - Secrets used to pull images. - `serviceAccountName` - In case your firm requires to use a service account @@ -122,37 +107,6 @@ for the full spec of `native`. [Affinity](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/) settings for the StatefulSet PODs. - A best effort and a hard requirement node anti-affinity config look like - below, if you want to do AZ (Availablity Zone) anti-affinity, change the value - of `topologyKey` from `kubernetes.io/hostname` to - `topology.kubernetes.io/zone`. - -```yaml -# Best effort -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - controller: eventbus-controller - eventbus-name: default - topologyKey: kubernetes.io/hostname - weight: 100 -``` - -```yaml -# Hard requirement -affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - controller: eventbus-controller - eventbus-name: default - topologyKey: kubernetes.io/hostname -``` - #### More About Native NATS EventBus - Messages limit per channel defaults to 1,000,000. It could be customized by @@ -162,10 +116,11 @@ affinity: `spec.nats.native.maxBytes` to customize it, `"0"` means unlimited. - Max age of messages is 72 hours, which means messages over 72 hours will be - deleted automatically. It can be cutomized by setting + deleted automatically. It can be customized by setting `spec.nats.native.maxAge`, i.e. `240h`. -- Max subscription number is 1000. +- Max subscription number is defaults to `1000`, it could be customized by + setting `spec.nats.native.maxSubs`. ### Exotic @@ -211,5 +166,5 @@ A sample result: } ``` -- All the events in a namespace are published to same channel/subject/topic +All the events in a namespace are published to same channel/subject/topic named `eventbus-{namespace}` in the EventBus. diff --git a/docs/eventsources/filtering.md b/docs/eventsources/filtering.md new file mode 100644 index 0000000000..770760d3c1 --- /dev/null +++ b/docs/eventsources/filtering.md @@ -0,0 +1,61 @@ +# Filtering EventSources + +When event sources watch events from external data sources (ie. Kafka topics), it will ingest all messages. +With filtering, we are able to apply constraints and determine if the event should be published or skipped. +This is achieved by evaluating an expression in the EventSource spec. + +# Fields + +A `filter` in an example Kafka EventSource: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: kafka +spec: + kafka: + example: + url: kafka.argo-events:9092 + topic: topic-2 + jsonBody: true + partition: "1" + filter: # filter field + expression: "(body.id == 4) && (body.name != 'Joe')" #expression to be evaluated + connectionBackoff: + duration: 10s + steps: 5 + factor: 2 + jitter: 0.2 +``` + +The `expression` string is evaluated with the [expr](https://github.com/antonmedv/expr) package which offers a wide set of basic operators and comparators. + +# Example + +1. Creating a Kafka EventSource with filter field present + +``` +kubectl apply -f examples/event-sources/kafka.yaml -n argo-events +``` + +2. Sending an event with passing filter conditions to kafka + +``` +echo '{"id": 4,"name": "John", "email": "john@intuit.com", "department":{"id": 1,"name": "HR","bu":{"id": 2,"name" : "devp"}}}' | kcat -b localhost:9092 -P -t topic-2 +``` + +3. Sending an event with failing filter conditions + +``` +echo '{"id": 2,"name": "Johnson", "email": "john@intuit.com", "department":{"id": 1,"name": "HR","bu":{"id": 2,"name" : "devp"}}}' | kcat -b localhost:9092 -P -t topic-2 +``` + +# Output + +Successful logs from kafka event source pod: + +``` +{"level":"info","ts":1644017495.0711913,"logger":"argo-events.eventsource","caller":"kafka/start.go:217","msg":"dispatching event on the data channel...","eventSourceName":"kafka","eventSourceType":"kafka","eventName":"example","partition-id":"0"} +{"level":"info","ts":1644017495.1374986,"logger":"argo-events.eventsource","caller":"eventsources/eventing.go:514","msg":"succeeded to publish an event","eventSourceName":"kafka","eventName":"example","eventSourceType":"kafka","eventID":"kafka:example:kafka-broker:9092:topic-2:0:7"} +``` diff --git a/docs/eventsources/gcp-pubsub.md b/docs/eventsources/gcp-pubsub.md index c60504552b..b5292492b3 100644 --- a/docs/eventsources/gcp-pubsub.md +++ b/docs/eventsources/gcp-pubsub.md @@ -8,7 +8,7 @@ combination. | Topic Provided/Existing | Sub ID Provided/Existing | Actions | | ----------------------- | ------------------------ | --------------------------------------------------------------------- | -| Yes/Yes | Yes/Yes | Validate if given topic matches subsciption's topic | +| Yes/Yes | Yes/Yes | Validate if given topic matches subscription's topic | | Yes/Yes | Yes/No | Create a subscription with given ID | | Yes/Yes | No/- | Create or re-use subscription with auto generated subID | | Yes/No | Yes/No | Create a topic and a subscription with given subID | @@ -27,3 +27,13 @@ Full spec is available [here](https://github.com/argoproj/argo-events/tree/stabl See a PubSub EventSource [example](https://github.com/argoproj/argo-events/tree/stable/examples/event-sources/gcp-pubsub.yaml). + +## Running With PubSub Emulator + +You can point this event source at the +[PubSub Emulator](https://cloud.google.com/pubsub/docs/emulator) by +configuring the `PUBSUB_EMULATOR_HOST` environment variable for the event +source pod. This can be configured on the `EventSource` resource under the +`spec.template.container.env` key. This option is also documented in the +PubSub EventSource +[example](https://github.com/argoproj/argo-events/tree/stable/examples/event-sources/gcp-pubsub.yaml). diff --git a/docs/eventsources/ha.md b/docs/eventsources/ha.md index 64c2a12542..7c5609f9fc 100644 --- a/docs/eventsources/ha.md +++ b/docs/eventsources/ha.md @@ -18,15 +18,16 @@ behaviors!** - AWS SNS - AWS SQS -- Github -- Gitlab +- Bitbucket +- Bitbucket Server +- GitHub +- GitLab - NetApp Storage GRID -- Resource - Slack - Stripe - Webhook -When `spec.replicas` is set to N (N > 1), all the N Pods serve trafic. +When `spec.replicas` is set to N (N > 1), all the N Pods serve traffic. ## Active-Passive @@ -37,19 +38,44 @@ old one is gone. - AMQP - Azure Events Hub -- Kafka +- Calendar +- Emitter - GCP PubSub +- Generic - File - HDFS -- NATS +- Kafka - Minio - MQTT -- Emitter +- NATS - NSQ - Pulsar - Redis -- Calendar -- Generic +- Resource + +## Kubernetes Leader Election + +By default, Argo Events will use NATS for the HA leader election except when +using a Kafka Eventbus, in which case a kubernetes leader election will be used. +If using a different EventBus you can opt-in to a Kubernetes native leader +election by specifying the following annotation. +```yaml +annotations: + events.argoproj.io/leader-election: k8s +``` + +To use Kubernetes leader election the following RBAC rules need to be associated +with the EventSource ServiceAccount. +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-events-leaderelection-role +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "create", "update"] +``` ## More diff --git a/docs/eventsources/multiple-events.md b/docs/eventsources/multiple-events.md index 046c6c9657..bcc0edfd7b 100644 --- a/docs/eventsources/multiple-events.md +++ b/docs/eventsources/multiple-events.md @@ -29,10 +29,7 @@ spec: ``` For the example above, there are 2 events configured in the EventSource named -`webhook`. Please use different `port` numbers for different events, this is the -limitation for multiple events configured in a `webhook` EventSource, this -limitation also applies to `webhook` extended event source types such as -`github`, `sns`. +`webhook`. ## Mixed EventSource Types diff --git a/docs/eventsources/naming.md b/docs/eventsources/naming.md index 8edf1d5693..454f378bf2 100644 --- a/docs/eventsources/naming.md +++ b/docs/eventsources/naming.md @@ -38,4 +38,4 @@ spec: ## EventName `eventName` is the map key of a configured event. In the example above, -`eventName` could be `exmaple` or `example-foo`. +`eventName` could be `example` or `example-foo`. diff --git a/docs/eventsources/services.md b/docs/eventsources/services.md index 248e53430f..771cfd2032 100644 --- a/docs/eventsources/services.md +++ b/docs/eventsources/services.md @@ -29,5 +29,25 @@ expose the endpoint for external access, please manage it by using native K8s objects (i.e. a Load Balancer type Service, or an Ingress), and remove `service` field from the EventSource object. +For example, you can create a K8s service with the selector `eventsource-name: webhook` +to select pods created for the "webhook" event source, like the following: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: webhook-eventsource +spec: + ports: + - port: 12000 + protocol: TCP + targetPort: 12000 + selector: + eventsource-name: webhook + type: NodePort +``` + +Then you can expose the service for external access using native K8s objects as mentioned above. + You can refer to [webhook heath check](webhook-health-check.md) if you need a health check endpoint for LB Service or Ingress configuration. diff --git a/docs/eventsources/setup/amqp.md b/docs/eventsources/setup/amqp.md index ef0cd8a13a..538ec96ad1 100644 --- a/docs/eventsources/setup/amqp.md +++ b/docs/eventsources/setup/amqp.md @@ -1,6 +1,6 @@ # AMQP -AMQP event-source listens to messages on the MQ and helps sensor trigger the workloads. +AMQP event-source listens to messages on the MQ and helps sensor trigger the workloads. ## Event Structure @@ -17,20 +17,20 @@ The structure of an event dispatched by the event-source over the eventbus looks "subject": "name_of_the_configuration_within_event_source" }, "data": { - "contentType": "ContentType is the MIME content type", - "contentEncoding": "ContentEncoding is the MIME content encoding", - "deliveryMode": "Delivery mode can be either - non-persistent (1) or persistent (2)", - "priority": "Priority refers to the use - 0 to 9", - "correlationId": "CorrelationId is the correlation identifier", - "replyTo": "ReplyTo is the address to reply to (ex: RPC)", - "expiration": "Expiration refers to message expiration spec", - "messageId": "MessageId is message identifier", - "timestamp": "Timestamp refers to the message timestamp", - "type": "Type refers to the message type name", - "appId": "AppId refers to the application id", - "exchange": "Exchange is basic.publish exchange", - "routingKey": "RoutingKey is basic.publish routing key", - "body": "Body represents the messsage body", + "contentType": "ContentType is the MIME content type", + "contentEncoding": "ContentEncoding is the MIME content encoding", + "deliveryMode": "Delivery mode can be either - non-persistent (1) or persistent (2)", + "priority": "Priority refers to the use - 0 to 9", + "correlationId": "CorrelationId is the correlation identifier", + "replyTo": "ReplyTo is the address to reply to (ex: RPC)", + "expiration": "Expiration refers to message expiration spec", + "messageId": "MessageId is message identifier", + "timestamp": "Timestamp refers to the message timestamp", + "type": "Type refers to the message type name", + "appId": "AppId refers to the application id", + "exchange": "Exchange is basic.publish exchange", + "routingKey": "RoutingKey is basic.publish routing key", + "body": "Body represents the message body", } } @@ -92,11 +92,11 @@ The structure of an event dispatched by the event-source over the eventbus looks kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/amqp.yaml -8. Lets set up a rabbitmq publisher. If you don't have `pika` installed, run. +8. Lets set up a RabbitMQ publisher. If you don't have `pika` installed, run. python -m pip install pika --upgrade -9. Open a python REPL and run following code to publish a message on `exhange` called `test`. +9. Open a python REPL and run following code to publish a message on `exchange` called `test`. import pika connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) @@ -105,7 +105,8 @@ The structure of an event dispatched by the event-source over the eventbus looks routing_key='hello', body='{"message": "hello"}') -10. As soon as you publish a message, sensor will trigger an Argo workflow. Run `argo list` to find the workflow. - +10. As soon as you publish a message, sensor will trigger an Argo workflow. Run `argo list` to find the workflow. + ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/aws-sns.md b/docs/eventsources/setup/aws-sns.md index 8da58c3267..cfcf20c22f 100644 --- a/docs/eventsources/setup/aws-sns.md +++ b/docs/eventsources/setup/aws-sns.md @@ -3,6 +3,7 @@ SNS event-source subscribes to AWS SNS topics, listens events and helps sensor trigger the workloads. ## Event Structure + The structure of an event dispatched by the event-source over eventbus looks like following, { @@ -16,8 +17,8 @@ The structure of an event dispatched by the event-source over eventbus looks lik "subject": "name_of_the_configuration_within_event_source" }, "data": { - "header": "sns headers", - "body": "body refers to the sns notification data", + "header": "sns headers", + "body": "body refers to the sns notification data", } } @@ -44,11 +45,11 @@ The structure of an event dispatched by the event-source over eventbus looks lik 1. The event-source for AWS SNS creates a pod and exposes it via service. The name for the service is in `-eventsource-svc` format. - You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from AWS. + You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from AWS. You can find more information on Ingress or Route online. 1. Create the event source by running the following command. Make sure to update the URL in the configuration within the event-source. - + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/aws-sns.yaml 1. Go to SNS settings on AWS and verify the webhook is registered. You can also check it by inspecting the event-source pod logs. @@ -59,7 +60,8 @@ The structure of an event dispatched by the event-source over eventbus looks lik 1. Publish a message to the SNS topic, and it will trigger an argo workflow. -1. Run `argo list` to find the workflow. +1. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/aws-sqs.md b/docs/eventsources/setup/aws-sqs.md index bf015af130..ee1eb47955 100644 --- a/docs/eventsources/setup/aws-sqs.md +++ b/docs/eventsources/setup/aws-sqs.md @@ -17,13 +17,13 @@ The structure of an event dispatched by the event-source over the eventbus looks "subject": "name_of_the_configuration_within_event_source" }, "data": { - "messageId": "message id", - // Each message attribute consists of a Name, Type, and Value. For more information, - // see Amazon SQS Message Attributes - // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) - // in the Amazon Simple Queue Service Developer Guide. - "messageAttributes": "message attributes", - "body": "Body is the message data", + "messageId": "message id", + // Each message attribute consists of a Name, Type, and Value. For more information, + // see Amazon SQS Message Attributes + // (https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-message-attributes.html) + // in the Amazon Simple Queue Service Developer Guide. + "messageAttributes": "message attributes", + "body": "Body is the message data", } } @@ -64,7 +64,8 @@ The structure of an event dispatched by the event-source over the eventbus looks aws sqs send-message --queue-url https://sqs.us-east-1.amazonaws.com/XXXXX/test --message-body '{"message": "hello"}' -1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/azure-queue-storage.md b/docs/eventsources/setup/azure-queue-storage.md new file mode 100644 index 0000000000..022a83a5b1 --- /dev/null +++ b/docs/eventsources/setup/azure-queue-storage.md @@ -0,0 +1,64 @@ +# Azure Queue Storage + +Azure Queue Storage event-source allows you to consume messages from azure storage queues. + +## Event Structure + +The structure of an event dispatched by the event-source over the eventbus looks like following, + + { + "context": { + "id": "unique_event_id", + "source": "name_of_the_event_source", + "specversion": "cloud_events_version", + "type": "type_of_event_source", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + "time": "event_time", + }, + "data": { + "messageID": "MessageID is the ID of the message", + "body": "Body represents the message body", + "insertionTime": "InsertionTime is the time the message was inserted into the queue", + } + } + +## Setup + +1. Create a queue called `test` either using az cli or Azure storage management console. + +1. Fetch your connection string for Azure Queue Storage and base64 encode it. + +1. Create a secret called `azure-secret` as follows. + + apiVersion: v1 + kind: Secret + metadata: + name: azure-secret + type: Opaque + data: + connectionstring: + +1. Deploy the secret. + + kubectl -n argo-events apply -f azure-secret.yaml + +1. Create the event source by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-queue-storage.yaml + +1. Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. + +1. Create a sensor by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-queue-storage.yaml + +1. Dispatch a message to the queue. + + az storage message put -q test --content {"message": "hello"}' --account-name mystorageaccount --connection-string "" + +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. + +## Troubleshoot + +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). \ No newline at end of file diff --git a/docs/eventsources/setup/azure-service-bus.md b/docs/eventsources/setup/azure-service-bus.md new file mode 100644 index 0000000000..8200d8bd5f --- /dev/null +++ b/docs/eventsources/setup/azure-service-bus.md @@ -0,0 +1,80 @@ +# Azure Service Bus + +Service Bus event-source allows you to consume messages from queus and topics in Azure Service Bus and helps sensor trigger workflows. + +## Event Structure + +The structure of an event dispatched by the event-source over the eventbus looks like following, + + { + "context": { + "id": "unique_event_id", + "source": "name_of_the_event_source", + "specversion": "cloud_events_version", + "type": "type_of_event_source", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + "time": "event_time", + }, + "data": { + "applicationProperties": "ApplicationProperties can be used to store custom metadata for a message", + "body": "Body represents the message body", + "contentType": "ContentType is the MIME content type", + "correlationID": "CorrelationID is the correlation identifier", + "enqueuedTime": "EnqueuedTime is the time when the message was enqueued", + "messageID": "ID of the message", + "replyTo": "ReplyTo is an application-defined value specify a reply path to the receiver of the message", + "sequenceNumber": "SequenceNumber is a unique number assigned to a message by Service Bus", + "subject": "Subject enables an application to indicate the purpose of the message, similar to an email subject line", + } + } + +## Setup + +1. Create a queue called `test` either using Azure CLI or Azure Service Bus management console. + +1. Fetch your connection string for Azure Service Bus and base64 encode it. + +1. Create a secret called `azure-secret` as follows. + + apiVersion: v1 + kind: Secret + metadata: + name: azure-secret + type: Opaque + data: + connectionstring: + +1. Deploy the secret. + + kubectl -n argo-events apply -f azure-secret.yaml + +1. Create the event source by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/azure-service-bus.yaml + +1. Inspect the event-source pod logs to make sure it was able to listen to the queue specified in the event source to consume messages. + +1. Create a sensor by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus.yaml + +1. Lets set up a Service Bus client. If you don't have `azure-servicebus`installed, run. + + python -m pip install azure-servicebus --upgrade + +1. Open a python REPL and run the following code to send a message on the queue called `test`. + + Before running the code, make sure you have the `SERVICE_BUS_CONNECTION_STRING` environment variable set. + This is the connection string for your Azure Service Bus. + + import os, json + from azure.servicebus import ServiceBusClient, ServiceBusMessage + servicebus_client = ServiceBusClient.from_connection_string(conn_str=os.environ['SERVICE_BUS_CONNECTION_STRING']) + with servicebus_client: + sender = servicebus_client.get_queue_sender(queue_name="test") + with sender: + message = ServiceBusMessage('{"hello": "world"}') + sender.send_messages(message) + +2. As soon as you publish a message, sensor will trigger an Argo workflow. Run argo list to find the workflow. diff --git a/docs/eventsources/setup/bitbucket.md b/docs/eventsources/setup/bitbucket.md new file mode 100644 index 0000000000..9d2c7eb1b4 --- /dev/null +++ b/docs/eventsources/setup/bitbucket.md @@ -0,0 +1,78 @@ +# Bitbucket (Cloud) + +Bitbucket event-source programmatically configures webhooks for projects on Bitbucket and helps sensor trigger the workloads on events. + +## Event Structure + +The structure of an event dispatched by the event-source over the eventbus looks like following, + + { + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "body": "Body is the Bitbucket event payload", + "headers": "Headers from the Bitbucket event", + } + } + +## Specification + +Bitbucket event-source specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md#bitbucketeventsource).
+Example event-source yaml file is [here](https://github.com/argoproj/argo-events/blob/master/examples/event-sources/bitbucket.yaml). + +## Setup + +> **_NOTE:_** In this setup, we will use the basic auth strategy together with [App password](https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/) (there is also support for [OAuth](https://support.atlassian.com/bitbucket-cloud/docs/use-oauth-on-bitbucket-cloud/)). + +1. Create an App password if you don't have one. Follow [instructions](https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/) to create a new Bitbucket App password. + Grant it the `Webhooks - Read and Write` permissions as well as any permissions that applies to the events that the webhook subscribes to (e.g. if you're using the [example event-source yaml file](https://github.com/argoproj/argo-events/blob/master/examples/event-sources/bitbucket.yaml) which subscribes to `repo:push` event then you would also need to grant the `Repositories - Read` permission). + +1. Base64 encode your App password and your Bitbucket username. + + echo -n | base64 + echo -n | base64 + +1. Create a secret called `bitbucket-access` that contains your encoded Bitbucket credentials. + + apiVersion: v1 + kind: Secret + metadata: + name: bitbucket-access + type: Opaque + data: + username: + password: + +1. Deploy the secret into K8s cluster. + + kubectl -n argo-events apply -f bitbucket-access.yaml + +1. The event-source for Bitbucket creates a pod and exposes it via service. + The name for the service is in `-eventsource-svc` format. + You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from Bitbucket. + You can find more information on Ingress or Route online. + +1. Create the event source by running the following command. You can use the [example event-source yaml file](https://github.com/argoproj/argo-events/blob/master/examples/event-sources/bitbucket.yaml) but make sure to replace the `url` field and to modify `owner`, `repositorySlug` and `projectKey` fields with your own repo. + + kubectl apply -n argo-events -f + +1. Go to `Webhooks` under your project settings on Bitbucket and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. + +1. Create the sensor by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucket.yaml + +1. Make a change to one of your project files and commit. It will trigger an argo workflow. + +1. Run `argo list` to find the workflow. + +## Troubleshoot + +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/bitbucketserver.md b/docs/eventsources/setup/bitbucketserver.md new file mode 100644 index 0000000000..e624cd7b46 --- /dev/null +++ b/docs/eventsources/setup/bitbucketserver.md @@ -0,0 +1,75 @@ +# Bitbucket Server + +Bitbucket Server event-source programmatically configures webhooks for projects on Bitbucket Server and helps sensor trigger the workloads on events. + +## Event Structure + +The structure of an event dispatched by the event-source over the eventbus looks like following, + + { + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "body": "Body is the Bitbucket Server event payload", + "headers": "Headers from the Bitbucket Server event", + } + } + +## Specification + +Bitbucket Server event-source specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md#bitbucketservereventsource).
+Example event-source yaml file is [here](https://github.com/argoproj/argo-events/blob/master/examples/event-sources/bitbucketserver.yaml). + +## Setup + +1. Create an API token if you don't have one. Follow [instructions](https://confluence.atlassian.com/bitbucketserver072/personal-access-tokens-1005335924.html) to create a new Bitbucket Server API Token. + Grant it the `Projects: Admin` permissions. + +1. Base64 encode your API token key. + + echo -n | base64 + +1. Create a secret called `bitbucketserver-access` that contains your encoded Bitbucket Server API token. You can also include a secret key that is encoded with `base64` for your webhook if any. + + apiVersion: v1 + kind: Secret + metadata: + name: bitbucketserver-access + type: Opaque + data: + token: + secret: + +1. Deploy the secret into K8s cluster. + + kubectl -n argo-events apply -f bitbucketserver-access.yaml + +1. The event-source for Bitbucket Server creates a pod and exposes it via service. + The name for the service is in `-eventsource-svc` format. + You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket Server. + You can find more information on Ingress or Route online. + +1. Create the event source by running the following command. You can use the example event-source yaml file from [here](https://github.com/argoproj/argo-events/blob/master/examples/event-sources/bitbucketserver.yaml) but make sure to replace the `url` field and to modify the `repositories` list with your own repos. + + kubectl apply -n argo-events -f + +1. Go to `Webhooks` under your project settings on Bitbucket Server and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. + +1. Create the sensor by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/sensors/bitbucketserver.yaml + +1. Make a change to one of your project files and commit. It will trigger an argo workflow. + +1. Run `argo list` to find the workflow. + +## Troubleshoot + +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/calendar.md b/docs/eventsources/setup/calendar.md index ea49ceb695..495e4b9352 100644 --- a/docs/eventsources/setup/calendar.md +++ b/docs/eventsources/setup/calendar.md @@ -3,6 +3,7 @@ Calendar event-source generates events on either a cron schedule or an interval and helps sensor trigger workloads. ## Event Structure + The structure of an event dispatched by the event-source over the eventbus looks like following, { @@ -32,10 +33,11 @@ Calendar event-source specification is available [here](https://github.com/argop kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/calendar.yaml 1. The event-source will generate events at every 10 seconds. Let's create the sensor. - + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/calendar.yaml 1. Once the sensor pod is in running state, wait for next interval to occur for sensor to trigger workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/emitter.md b/docs/eventsources/setup/emitter.md index 482fcf17a1..8a5c983368 100644 --- a/docs/eventsources/setup/emitter.md +++ b/docs/eventsources/setup/emitter.md @@ -80,7 +80,7 @@ Emitter event-source specification is available [here](https://github.com/argopr - name: broker-volume hostPath: path: /emitter #directory on host - + 1. Create the event-source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/emitter.yaml @@ -91,9 +91,10 @@ Emitter event-source specification is available [here](https://github.com/argopr kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/emitter.yaml -1. Send a message on emitter channel using one of the clients https://emitter.io/develop/golang/. +1. Send a message on emitter channel using one of the clients . -1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/file.md b/docs/eventsources/setup/file.md index 9c45279103..9c37884c47 100644 --- a/docs/eventsources/setup/file.md +++ b/docs/eventsources/setup/file.md @@ -3,6 +3,7 @@ File event-source listens to file system events and helps sensor trigger workloads. ## Event Structure + The structure of an event dispatched by the event-source over the eventbus looks like following, { @@ -21,7 +22,6 @@ The structure of an event dispatched by the event-source over the eventbus looks } } - ## Specification File event-source specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md#fileeventsource). @@ -43,15 +43,16 @@ File event-source specification is available [here](https://github.com/argoproj/ kubectl -n argo-events exec -it -c file-events -- /bin/bash 1. Let's create a file called `x.txt` under `test-data` directory in the event-source pod. - + cd test-data cat < x.txt hello EOF -1. Once you create file `x.txt`, the sensor will trigger argo workflow. Run `argo list` to find the workflow. +1. Once you create file `x.txt`, the sensor will trigger argo workflow. Run `argo list` to find the workflow. 1. For real-world use cases, you should use PersistentVolumeClaim. - + ## Troubleshoot -Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). + +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/gcp-pub-sub.md b/docs/eventsources/setup/gcp-pub-sub.md index 5224bfca25..ace5a49dd2 100644 --- a/docs/eventsources/setup/gcp-pub-sub.md +++ b/docs/eventsources/setup/gcp-pub-sub.md @@ -40,22 +40,18 @@ GCP Pub/Sub event-source specification is available [here](https://github.com/ar 1. Create a K8s secret called `gcp-credentials` to store the credentials file. - ```yaml - apiVersion: v1 - data: - key.json: - kind: Secret - metadata: - name: gcp-credentials - namespace: argo-events - type: Opaque - ``` + apiVersion: v1 + data: + key.json: + kind: Secret + metadata: + name: gcp-credentials + namespace: argo-events + type: Opaque 1. Create the event source by running the following command. - ```sh - kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/gcp-pubsub.yaml - ``` + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/gcp-pubsub.yaml If you use Workload Identity, omit `credentialSecret` field. Instead don't forget to configure appropriate service account (see [example](https://github.com/argoproj/argo-events/blob/stable/examples/event-sources/gcp-pubsub.yaml)). @@ -63,17 +59,15 @@ GCP Pub/Sub event-source specification is available [here](https://github.com/ar 1. Create the sensor by running the following command. - ```sh - kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gcp-pubsub.yaml - ``` + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gcp-pubsub.yaml 1. Publish a message from GCP Pub/Sub console. 1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. -## Subscription, topic and service account preparetion +## Subscription, topic and service account preparation -You can use exisiting subscriptions/topics, or let Argo Events create them. +You can use existing subscriptions/topics, or let Argo Events create them. Here's the table of which fields are required in the configuration file and what permissions are needed for service account. diff --git a/docs/eventsources/setup/github.md b/docs/eventsources/setup/github.md index cf9fa4f2b6..234aeec7ff 100644 --- a/docs/eventsources/setup/github.md +++ b/docs/eventsources/setup/github.md @@ -17,8 +17,8 @@ The structure of an event dispatched by the event-source over the eventbus looks "subject": "name_of_the_configuration_within_event_source" }, "data": { - "body": "Body is the Github event data", - "headers": "Headers from the Github event", + "body": "Body is the Github event data", + "headers": "Headers from the Github event", } } @@ -30,13 +30,13 @@ Example event-source yaml file is [here](https://github.com/argoproj/argo-events ## Setup 1. Create an API token if you don't have one. Follow [instructions](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line) to create a new GitHub API Token. - Grant it the `repo_hook` permissions. + Grant it the `repo_hook` permissions. -1. Base64 encode your api token key. +1. Base64 encode your API token key. echo -n | base64 -1. Create a secret called `github-access`. +1. Create a secret called `github-access` that contains your encoded GitHub API token. You can also include a secret key that is encoded with `base64` for your webhook if any. apiVersion: v1 kind: Secret @@ -45,6 +45,7 @@ Example event-source yaml file is [here](https://github.com/argoproj/argo-events type: Opaque data: token: + secret: 1. Deploy the secret into K8s cluster. @@ -52,11 +53,11 @@ Example event-source yaml file is [here](https://github.com/argoproj/argo-events 1. The event-source for GitHub creates a pod and exposes it via service. The name for the service is in `-eventsource-svc` format. - You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from GitHub. + You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitHub. You can find more information on Ingress or Route online. 1. Create the event source by running the following command. Make sure to replace the `url` field. - + kubectl apply -n argo-events -f 1. Go to `Webhooks` under your project settings on GitHub and verify the webhook is registered. You can also do the same by looking at the event-source pod logs. @@ -67,7 +68,8 @@ Example event-source yaml file is [here](https://github.com/argoproj/argo-events 1. Make a change to one of your project files and commit. It will trigger an argo workflow. -1. Run `argo list` to find the workflow. +1. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/gitlab.md b/docs/eventsources/setup/gitlab.md index 6def7b7f8f..1ac56454d0 100644 --- a/docs/eventsources/setup/gitlab.md +++ b/docs/eventsources/setup/gitlab.md @@ -17,8 +17,8 @@ The structure of an event dispatched by the event-source over the eventbus looks "subject": "name_of_the_configuration_within_event_source" }, "data": { - "body": "Body is the gitlab event data", - "headers": "Headers from the Gitlab event", + "body": "Body is the GitLab event data", + "headers": "Headers from the GitLab event", } } @@ -52,23 +52,23 @@ Example event-source yaml file is [here](https://github.com/argoproj/argo-events 1. The event-source for GitLab creates a pod and exposes it via service. The name for the service is in `-eventsource-svc` format. - You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from GitLab. + You will need to create an Ingress or OpenShift Route for the event-source service so that it can be reached from GitLab. You can find more information on Ingress or Route online. 1. Create the event source by running the following command. Make sure to update `url` field. - + kubectl apply -n argo-events -f 1. Go to `Webhooks` under your project settings on GitLab and verify the webhook is registered. - + 1. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/gitlab.yaml 1. Make a change to one of your project files and commit. It will trigger an argo workflow. -1. Run `argo list` to find the workflow. +1. Run `argo list` to find the workflow. ## Troubleshoot -Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/kafka.md b/docs/eventsources/setup/kafka.md index 7c9af8ad8f..9ec4e69b3e 100644 --- a/docs/eventsources/setup/kafka.md +++ b/docs/eventsources/setup/kafka.md @@ -3,6 +3,7 @@ Kafka event-source listens to messages on topics and helps the sensor trigger workloads. ## Event Structure + The structure of an event dispatched by the event-source over the eventbus looks like following, { @@ -29,7 +30,7 @@ Kafka event-source specification is available [here](https://github.com/argoproj ## Setup -1. Make sure to set up the Kafka cluster in Kubernetes if you don't already have one. You can refer to https://github.com/Yolean/kubernetes-kafka for installation instructions. +1. Make sure to set up the Kafka cluster in Kubernetes if you don't already have one. You can refer to for installation instructions. 1. Create the event source by running the following command. Make sure to update the appropriate fields. @@ -39,9 +40,10 @@ Kafka event-source specification is available [here](https://github.com/argoproj kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/kafka.yaml -1. Send message by using Kafka client. More info on how to send message at https://kafka.apache.org/quickstart. +1. Send message by using Kafka client. More info on how to send message at . -1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/minio.md b/docs/eventsources/setup/minio.md index 6f4c082efc..70149a46de 100644 --- a/docs/eventsources/setup/minio.md +++ b/docs/eventsources/setup/minio.md @@ -6,6 +6,7 @@ Minio event-source listens to minio bucket notifications and helps sensor trigge please set up the AWS SNS event-source. ## Event Structure + The structure of an event dispatched by the event-source over the eventbus looks like following, { @@ -52,7 +53,7 @@ The structure of an event dispatched by the event-source over the eventbus looks name: artifacts-minio namespace: argo-events -1. The event source we are going to use configures notifications for a bucket called `input`. +1. The event source we are going to use configures notifications for a bucket called `input`. mc mb minio/input @@ -61,7 +62,7 @@ The structure of an event dispatched by the event-source over the eventbus looks kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/minio.yaml 1. Let's create the sensor. - + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/minio.yaml 1. Create a file named and `hello-world.txt` and upload it onto to the `input` bucket. This will trigger the argo workflow. @@ -69,4 +70,5 @@ The structure of an event dispatched by the event-source over the eventbus looks 1. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/mqtt.md b/docs/eventsources/setup/mqtt.md index 785e280e6a..422e40cbc7 100644 --- a/docs/eventsources/setup/mqtt.md +++ b/docs/eventsources/setup/mqtt.md @@ -6,7 +6,6 @@ The event-source listens to messages over MQTT and helps sensor trigger the work The structure of an event dispatched by the event-source over the eventbus looks like following, - { "context": { "type": "type_of_event_source", @@ -30,7 +29,7 @@ MQTT event-source specification is available [here](https://github.com/argoproj/ ## Setup -1. Make sure to set up the MQTT Broker and Bridge in Kubernetes if you don't already have one. +1. Make sure to set up the MQTT Broker and Bridge in Kubernetes if you don't already have one. 1. Create the event source by running the following command. Make sure to update the appropriate fields. @@ -38,13 +37,12 @@ MQTT event-source specification is available [here](https://github.com/argoproj/ 1. Create the sensor by running the following command. - kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/mqtt.yaml + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/mqtt-sensor.yaml 1. Send message by using MQTT client. -1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. ## Troubleshoot -Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). - +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/nats.md b/docs/eventsources/setup/nats.md index 749a5ea49a..b1e1c29786 100644 --- a/docs/eventsources/setup/nats.md +++ b/docs/eventsources/setup/nats.md @@ -3,6 +3,7 @@ NATS event-source listens to NATS subject notifications and helps sensor trigger the workloads. ## Event Structure + The structure of an event dispatched by the event-source over the eventbus looks like following, { @@ -17,6 +18,7 @@ The structure of an event dispatched by the event-source over the eventbus looks }, "data": { "subject": "name_of_the_nats_subject", + "headers": "headers_of_the_nats_message", "body": "message_payload" } } @@ -27,10 +29,10 @@ NATS event-source specification is available [here](https://github.com/argoproj/ ## Setup -1. Make sure to have NATS cluster deployed in the Kubernetes. If you don't have one already installed, please refer https://github.com/nats-io/nats-operator for details. +1. Make sure to have NATS cluster deployed in the Kubernetes. If you don't have one already installed, please refer for details. NATS cluster setup for test purposes, - + apiVersion: v1 kind: Service metadata: @@ -86,7 +88,6 @@ NATS event-source specification is available [here](https://github.com/argoproj/ kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nats.yaml - 1. Create the sensor by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/nats.yaml @@ -94,12 +95,13 @@ NATS event-source specification is available [here](https://github.com/argoproj/ 1. If you are running NATS on local K8s cluster, make sure to `port-forward` to pod, kubectl -n argo-events port-forward 4222:4222 - -1. Publish a message for the subject specified in the event source. Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe. + +1. Publish a message for the subject specified in the event source. Refer the nats example to publish a message to the subject . go run main.go -s localhost foo '{"message": "hello"}' -1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/nsq.md b/docs/eventsources/setup/nsq.md index 022e27d71d..a82f46b01d 100644 --- a/docs/eventsources/setup/nsq.md +++ b/docs/eventsources/setup/nsq.md @@ -17,9 +17,9 @@ The structure of an event dispatched by the event-source over the eventbus looks "subject": "name_of_the_configuration_within_event_source" }, "data": { - "body": "Body is the message data", - "timestamp": "timestamp of the message", - "nsqdAddress": "NSQDAddress is the address of the nsq host" + "body": "Body is the message data", + "timestamp": "timestamp of the message", + "nsqdAddress": "NSQDAddress is the address of the nsq host" } } @@ -239,7 +239,7 @@ NSQ event-source is available [here](https://github.com/argoproj/argo-events/blo curl -X POST 'http://localhost:4151/topic/create?topic=hello' curl -X POST 'http://localhost:4151/channel/create?topic=hello&channel=my-channel' - + 1. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/nsq.yaml @@ -252,7 +252,8 @@ NSQ event-source is available [here](https://github.com/argoproj/argo-events/blo curl -d '{"message": "hello"}' 'http://localhost:4151/pub?topic=hello&channel=my-channel' -9. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. +9. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/pulsar.md b/docs/eventsources/setup/pulsar.md index 4a8356a5db..0a9e14b812 100644 --- a/docs/eventsources/setup/pulsar.md +++ b/docs/eventsources/setup/pulsar.md @@ -17,9 +17,9 @@ The structure of an event dispatched by the event-source over the eventbus looks "subject": "name_of_the_configuration_within_event_source" }, "data": { - "body": "body is the message data", - "publishTime": "timestamp of the message", - "key": "message key" + "body": "body is the message data", + "publishTime": "timestamp of the message", + "key": "message key" } } @@ -88,12 +88,13 @@ Pulsar event-source is available [here](https://github.com/argoproj/argo-events/ kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/pulsar.yaml 1. Deploy the sensor. - + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/pulsar.yaml 1. Publish a message on topic `test`. -1. Run `argo list` to find the workflow. +1. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/redis-streams.md b/docs/eventsources/setup/redis-streams.md new file mode 100644 index 0000000000..e048a69bc0 --- /dev/null +++ b/docs/eventsources/setup/redis-streams.md @@ -0,0 +1,81 @@ +# Redis Streams + +Redis stream event-source listens to messages on Redis streams and helps sensor trigger workloads. + +Messages from the stream are read using the Redis consumer group. The main reason for using consumer group is to resume from the last read upon pod restarts. A common consumer group (defaults to "argo-events-cg") is created (if not already exists) on all specified streams. When using consumer group, each read through a consumer group is a write operation, because Redis needs to update the last retrieved message id and the pending entries list(PEL) of that specific user in the consumer group. So it can only work with the master Redis instance and not replicas (). + +Redis stream event source expects all the streams to be present on the Redis server. This event source only starts pulling messages from the streams when all of the specified streams exist on the Redis server. On the initial setup, the consumer group is created on all the specified streams to start reading from the latest message (not necessarily the beginning of the stream). On subsequent setups (the consumer group already exists on the streams) or during pod restarts, messages are pulled from the last unacknowledged message in the stream. + +The consumer group is never deleted automatically. If you want a completely fresh setup again, you must delete the consumer group from the streams. + +## Event Structure + +The structure of an event dispatched by the event-source over the eventbus looks like following, + + { + "context": { + "id": "unique_event_id", + "source": "name_of_the_event_source", + "specversion": "cloud_events_version", + "type": "type_of_event_source", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source", + "time": "event_time" + }, + "data": { + "stream": "Name of the Redis stream", + "message_id": "Message Id", + "values": "message body" + } + } + +Example: + + { + "context": { + "id": "64313638396337352d623565612d343639302d383262362d306630333562333437363637", + "source": "redis-stream", + "specversion": "1.0", + "type": "redisStream", + "datacontenttype": "application/json", + "subject": "example", + "time": "2022-03-17T04:47:42Z" + }, + "data": { + "stream":"FOO", + "message_id":"1647495121754-0", + "values": {"key-1":"val-1", "key-2":"val-2"} + } + } + +## Specification + +Redis stream event-source specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md#argoproj.io/v1alpha1.RedisStreamEventSource). + +## Setup + +1. Follow the [documentation](https://kubernetes.io/docs/tutorials/configuration/configure-redis-using-configmap/#real-world-example-configuring-redis-using-a-configmap) to set up Redis database. + +1. Create the event source by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/redis-streams.yaml + +1. Create the sensor by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/redis-streams.yaml + +1. Log into redis pod using `kubectl`. + + kubectl -n argo-events exec -it -c -- /bin/bash + +1. Run `redis-cli` and publish a message on the stream `FOO`. + + XADD FOO * message hello + +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. + +## Troubleshoot + +Redis stream event source expects all the streams to be present on redis server. It only starts pulling messages from the streams when all of the specified streams exist on the redis server. + +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/redis.md b/docs/eventsources/setup/redis.md index 6b4254fa10..3f0e608064 100644 --- a/docs/eventsources/setup/redis.md +++ b/docs/eventsources/setup/redis.md @@ -17,9 +17,9 @@ The structure of an event dispatched by the event-source over the eventbus looks "subject": "name_of_the_configuration_within_event_source" }, "data": { - "channel": "Subscription channel", - "pattern": "Message pattern", - "body": "message body" // string + "channel": "Subscription channel", + "pattern": "Message pattern", + "body": "message body" // string } } @@ -47,7 +47,8 @@ Redis event-source specification is available [here](https://github.com/argoproj PUBLISH FOO hello -1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. +1. Once a message is published, an argo workflow will be triggered. Run `argo list` to find the workflow. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/resource.md b/docs/eventsources/setup/resource.md index 03a6a60346..dd6108df31 100644 --- a/docs/eventsources/setup/resource.md +++ b/docs/eventsources/setup/resource.md @@ -3,6 +3,7 @@ Resource event-source watches change notifications for K8s object and helps sensor trigger the workloads. ## Event Structure + The structure of an event dispatched by the event-source over the eventbus looks like following, { @@ -29,7 +30,8 @@ The structure of an event dispatched by the event-source over the eventbus looks Resource event-source specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md#resourceeventsource). ## Setup -1. Create the event source by running the following command. + +1. Create the event source by running the following command. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/resource.yaml @@ -40,7 +42,7 @@ Resource event-source specification is available [here](https://github.com/argop 1. The event source we created in step 1 contains configuration which makes the event-source listen to Argo workflows marked with label `app: my-workflow`. 1. Lets create a workflow called `my-workflow` with label `app: my-workflow`. - + apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: @@ -78,10 +80,9 @@ In the example above, we had set up the list option as follows, operation: "==" value: my-workflow - The `key-operation-value` items under the `filter -> labels` are used by the event-source to filter the objects that are eligible for the watch. So, in the present case, the event-source will set up a watch for those -objects who have label "app: my-workflow". You can add more `key-operation-value` items to the list as per your use-case. +objects who have label "app: my-workflow". You can add more `key-operation-value` items to the list as per your use-case. Similarly, you can pass `field` selectors to the watch list options, e.g., @@ -98,10 +99,10 @@ Similarly, you can pass `field` selectors to the watch list options, e.g., # optional. operation: == value: my-workflow - **Note:** The `label` and `fields` under `filter` are used at the time of setting up the watch by the event-source. If you want to filter the objects based on the `annotations` or some other fields, use the `Data Filters` available in the sensor. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/sftp.md b/docs/eventsources/setup/sftp.md new file mode 100644 index 0000000000..bf211e6840 --- /dev/null +++ b/docs/eventsources/setup/sftp.md @@ -0,0 +1,53 @@ +# SFTP + +SFTP event-source polls an SFTP server to identify changes and helps sensor trigger workloads. + +## Event Structure + +The structure of an event dispatched by the event-source over the eventbus looks like following, + + { + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "name": "Relative path to the file or directory", + "op": "File operation that triggered the event" // Create, Remove + } + } + +## Specification + +SFTP event-source specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/event-source.md#sftpeventsource). + +## Setup + +1. Create the event source by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/sftp.yaml + +1. The event source has configuration to poll the sftp server every 10 seconds for `test-data` directory and file(s) called `x.txt`. + +1. Create the sensor by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/sftp.yaml + +1. Log into the event-source pod by running following command. + + kubectl -n argo-events exec -it -c sftp-events -- /bin/bash + +1. Create a file called `x.txt` under `test-data` directory on the SFTP server. + +1. Once you create file `x.txt`, the sensor will trigger argo workflow. Run `argo list` to find the workflow. + +1. For real-world use cases, you should use PersistentVolumeClaim. + +## Troubleshoot + +Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/eventsources/setup/webhook.md b/docs/eventsources/setup/webhook.md index 04e2e995ff..1751a512a5 100644 --- a/docs/eventsources/setup/webhook.md +++ b/docs/eventsources/setup/webhook.md @@ -35,10 +35,11 @@ Webhook event-source specification is available [here](https://github.com/argopr 1. The event-source pod is listening for HTTP requests on port `12000` and endpoint `/example`. It's time to create the sensor. - + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml 1. Once the sensor pod is in running state, test the setup by sending a POST request to event-source service. ## Troubleshoot + Please read the [FAQ](https://argoproj.github.io/argo-events/FAQ/). diff --git a/docs/index.md b/docs/index.md index d6d9032a07..9702ca602d 100644 --- a/docs/index.md +++ b/docs/index.md @@ -4,7 +4,7 @@ **Argo Events** is an event-driven workflow automation framework for Kubernetes which helps you trigger K8s objects, Argo Workflows, Serverless workloads, etc. -on events from variety of sources like webhook, s3, schedules, messaging queues, +on events from a variety of sources like webhooks, S3, schedules, messaging queues, gcp pubsub, sns, sqs, etc.
@@ -41,7 +41,7 @@ to set up Argo Events. 1. Argo Workflows 1. Standard K8s Objects -1. HTTP Requests / Serverless Workloads (OpenFaas, Kubeless, KNative etc.) +1. HTTP Requests / Serverless Workloads (OpenFaaS, Kubeless, KNative etc.) 1. AWS Lambda 1. NATS Messages 1. Kafka Messages diff --git a/docs/installation.md b/docs/installation.md index 706b193942..11fc06d044 100644 --- a/docs/installation.md +++ b/docs/installation.md @@ -13,22 +13,38 @@ kubectl create namespace argo-events -2. Deploy Argo Events, SA, ClusterRoles, Sensor Controller, EventBus Controller and EventSource Controller. +2. Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml # Install with a validating admission controller kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml - NOTE: - + NOTE: + * On GKE, you may need to grant your account the ability to create new custom resource definitions and clusterroles - + kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=cluster-admin --user=YOUREMAIL@gmail.com - - * On Openshift, make sure to grant `anyuid` scc to the service account. - oc adm policy add-scc-to-user anyuid system:serviceaccount:argo-events:default + * On OpenShift: + - Make sure to grant `anyuid` scc to the service accounts. + + oc adm policy add-scc-to-user anyuid system:serviceaccount:argo-events:argo-events-sa system:serviceaccount:argo-events:argo-events-webhook-sa + + - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo-events-webhook ClusterRole(this is necessary for the validating admission controller) + + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles/finalizers + verbs: + - update + - apiGroups: + - apps + resources: + - deployments/finalizers + verbs: + - update 3. Deploy the eventbus. @@ -40,24 +56,39 @@ kubectl create namespace argo-events -2. Deploy Argo Events, SA, Roles, Sensor Controller, EventBus Controller and EventSource Controller. +2. Deploy Argo Events SA, ClusterRoles, and Controller for Sensor, EventBus, and EventSource. kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/namespace-install.yaml - NOTE: - + NOTE: + * On GKE, you may need to grant your account the ability to create new custom resource definitions - + kubectl create clusterrolebinding YOURNAME-cluster-admin-binding --clusterrole=cluster-admin --user=YOUREMAIL@gmail.com - - * On Openshift, make sure to grant `anyuid` scc to the service account. + + * On OpenShift: + - Make sure to grant `anyuid` scc to the service account. oc adm policy add-scc-to-user anyuid system:serviceaccount:argo-events:default -3. Deploy the eventbus. + - Add update permissions for the `deployments/finalizers` and `clusterroles/finalizers` of the argo-events-webhook ClusterRole(this is necessary for the validating admission controller) + + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles/finalizers + verbs: + - update + - apiGroups: + - apps + resources: + - deployments/finalizers + verbs: + - update - kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml +3. Deploy the eventbus. + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml ### Using Kustomize @@ -72,11 +103,7 @@ Use either [`cluster-install`](https://github.com/argoproj/argo-events/tree/stab ### Using Helm Chart -Note: This method does not work with Helm 3, only Helm 2. - -Make sure you have helm client installed and Tiller server is running. To install helm, follow the link. - -1. Create namespace called argo-events. +Make sure you have helm client installed. To install helm, follow the link. 1. Add `argoproj` repository. @@ -87,7 +114,11 @@ Make sure you have helm client installed and Tiller server is running. To instal 1. Install `argo-events` chart. - helm install argo-events argo/argo-events + helm install argo-events argo/argo-events -n argo-events --create-namespace + +1. Deploy the eventbus. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/eventbus/native.yaml ### Migrate to v1.0.0 diff --git a/docs/managed-namespace.md b/docs/managed-namespace.md index 01d180d960..46c949726c 100644 --- a/docs/managed-namespace.md +++ b/docs/managed-namespace.md @@ -2,7 +2,9 @@ You can install `argo-events` in either cluster scoped or namespace scoped configuration, accordingly you need to set up ClusterRole or normal Role for service account `argo-events-sa`. -In namespace scope installation, you must run `eventbus-controller`, `eventsource-controller` and `sensor-controller` with `--namespaced`. If you would like to have the controllers watching a separated namespace, add `--managed-namespace` as well. +## v1.7+ + +In namespace scope installation, you must run `controller-manager` deployment with `--namespaced`. If you would like to have the controller watching a separate namespace, add `--managed-namespace` as well. For example: @@ -12,3 +14,7 @@ For example: - --managed-namespace - default ``` + +## Prior to v1.7 + +There were 3 controller deployments (`eventbus-controller`, `eventsource-controller` and `sensor-controller`) in the versions prior to v1.7, to run namespaced installation, add `--namespaced` argument to each of them. Argument `--managed-namespace` is also supported to watch a different namespace. diff --git a/docs/metrics.md b/docs/metrics.md index 48e7ab5caa..1c9bb91db1 100644 --- a/docs/metrics.md +++ b/docs/metrics.md @@ -123,7 +123,7 @@ Prometheus configuration. Following metrics are considered as [Golden Signals](https://sre.google/sre-book/monitoring-distributed-systems/#xref_monitoring_golden-signals) -of monitoring your applictions running with Argo Events. +of monitoring your applications running with Argo Events. - Latency diff --git a/docs/quick_start.md b/docs/quick_start.md index cec7480cc1..7b6739275c 100644 --- a/docs/quick_start.md +++ b/docs/quick_start.md @@ -1,8 +1,23 @@ # Getting Started -We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon a HTTP Post request. +We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. Note: You will need to have [Argo Workflows](https://argoproj.github.io/argo-workflows/) installed to make this work. +The Argo Workflow controller will need to be configured to listen for Workflow objects created in `argo-events` namespace. + (See [this](https://github.com/argoproj/argo-workflows/blob/master/docs/managed-namespace.md) link.) + The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no "--namespaced" argument) so that it has visiblity to all namespaces, or with "--managed-namespace" set to define "argo-events" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file, setting `ARGO_WORKFLOWS_VERSION` with your desired version. A list of versions can be found by viewing [these](https://github.com/argoproj/argo-workflows/tags) project tags in the Argo Workflow GitHub repository. + + export ARGO_WORKFLOWS_VERSION=3.5.4 + kubectl create namespace argo + kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/download/v$ARGO_WORKFLOWS_VERSION/install.yaml + +1. Install Argo Events + + kubectl create namespace argo-events + kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install.yaml + # Install with a validating admission controller + kubectl apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/manifests/install-validating-webhook.yaml + 1. Make sure to have the eventbus pods running in the namespace. Run following command to create the eventbus. @@ -16,17 +31,24 @@ Note: You will need to have [Argo Workflows](https://argoproj.github.io/argo-wor After running the above command, the event-source controller will create a pod and service. +1. Create a service account with RBAC settings to allow the sensor to trigger workflows, and allow workflows to function. + + # sensor rbac + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml + # workflow rbac + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/workflow-rbac.yaml + 1. Create webhook sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml - Once the sensor object is created, sensor controller will create corresponding pod and a service. + Once the sensor object is created, sensor controller will create corresponding pod and a service. 1. Expose the event-source pod via Ingress, OpenShift Route or port forward to consume requests over HTTP. - + kubectl -n argo-events port-forward $(kubectl -n argo-events get pod -l eventsource-name=webhook -o name) 12000:12000 & -1. Use either Curl or Postman to send a post request to the http://localhost:12000/example. +1. Use either Curl or Postman to send a post request to the . curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example diff --git a/docs/security.md b/docs/security.md new file mode 100644 index 0000000000..dd0451a525 --- /dev/null +++ b/docs/security.md @@ -0,0 +1,3 @@ +# Security + +Please see [SECURITY.md](https://github.com/argoproj/argo-events/blob/master/SECURITY.md) diff --git a/docs/sensors/filters/ctx.md b/docs/sensors/filters/ctx.md new file mode 100644 index 0000000000..f9f8a43b87 --- /dev/null +++ b/docs/sensors/filters/ctx.md @@ -0,0 +1,84 @@ + +# Context Filter + +Context filter is applied to the event context. A CloudEvent from Webhook event-source has payload structure as: + +```json +{ + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "header": {}, + "body": {}, + } +} +``` + +## Fields + +Context filter has following fields: + +```yaml +filters: + context: + type: event_type + subject: event_subject + source: event_source + datacontenttype: event_data_content_type +``` + +You can also specify id, specversion and time fields in the YAML manifest, but they are ignored in filtering. + +**Note** It could be useless to build a context filter based on `datacontenttype`, `source` and `subject` as currently they come fixed from event-source: + +- `datacontenttype` is always `application/json` +- `source` corresponds to `eventSourceName` specified in the Sensor YAML manifest +- `subject` corresponds to `eventName` specified in the Sensor YAML manifest + +## How it works + +Specify one or more of the available context fields: + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-ctx-filter +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + filters: + context: + source: custom-webhook +``` + +## Practical example + +1. Create a webhook event-source + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml + +1. Create a webhook sensor with context filter + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-context.yaml + +1. Send an HTTP request to event-source + + curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example + +1. You will notice in sensor logs that the event is invalid as the sensor expects `custom-webhook` as the value of the `source` + +## Further examples + +You can find some examples [here](https://github.com/argoproj/argo-events/tree/master/examples/sensors). diff --git a/docs/sensors/filters/data.md b/docs/sensors/filters/data.md new file mode 100644 index 0000000000..26cc9ad794 --- /dev/null +++ b/docs/sensors/filters/data.md @@ -0,0 +1,200 @@ + +# Data Filter + +Data filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: + +```json +{ + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "header": {}, + "body": {}, + } +} +``` + +Data filters are applied on `data` within the payload. + +## Fields + +A data filter has following fields: + +```yaml +filters: + dataLogicalOperator: logical_operator_applied + data: + - path: path_within_event_data + type: types_of_the_data + comparator: numeric_comparator + value: + - list_of_possible_values +``` + +> ⚠️ `PLEASE NOTE` order in which data filters are declared corresponds to the order in which the Sensor will evaluate them. + +## Logical operator + +Data filters can be evaluated together in 2 ways: + +- `and`, meaning that all data filters returning `true` are required for an event to be valid +- `or`, meaning that only one data filter returning `true` is enough for an event to be valid + +Any kind of error is considered as `false` (e.g. path not existing in event body). + +Such behaviour can be configured with `dataLogicalOperator` field in a Sensor dependency filters, e.g. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: data-filters-example +spec: + dependencies: + - name: sample-dependency + eventSourceName: webhook + eventName: sample-event + filters: + dataLogicalOperator: "or" + data: + - path: "a" + type: "bool" + value: + - "true" + - path: "b.c" + type: "number" + value: + - "3.14" + - path: "b.d" + type: "string" + value: + - "hello there" + # ... +``` + +Available values: + +- `""` (empty), defaulting to `and` +- `and`, default behaviour +- `or` + +> ⚠️ `PLEASE NOTE` Data logical operator values must be `lower case`. + +## How it works + +### Comparator + +The data filter offers following `comparators`: + +- `>=` +- `>` +- `=` +- `!=` +- `<` +- `<=` + +e.g. + +```yaml +filters: + data: + - path: body.value + type: number + comparator: ">" + value: + - "50.0" +``` + +**Note**: + +- If data type is `string`, you can pass either an exact value or a regex. In any case that value will be evaluated as a regex. +- If data types is `bool` or `float`, you have to pass an exact value. + +### Multiple paths + +If the HTTP request was less simple and contained multiple paths that you would like to filter against, you can use [multipaths](https://github.com/tidwall/gjson/blob/master/SYNTAX.md#multipaths) to combine multiple data paths in the payload into one string. + +For a given payload such as: + +```json +{ + "body": { + "action":"opened", + "labels": [ + {"id":"1234", "name":"Webhook"}, + {"id":"5678", "name":"Approved"} + ] + } +} +``` + +We want our sensor to fire if the action is "opened" and it has a label of "Webhook" or if the action is "closed" and it has a label of "Webhook" and "Approved". + +The path would look like `body.action,body.labels.#(name=="Webhook").name,body.labels.#(name=="Approved").name` + +This would return a string like: `"opened","Webhook"` or `"closed","Webhook","Approved"`.\ + +As the resulting data type will be a `string`, we can pass a regex over it: + +```yaml +filters: + data: + - path: 'body.action,body.labels.#(name=="Webhook").name,body.labels.#(name=="Approved").name' + type: string + value: + - '"opened","Webhook"' + - '"closed","Webhook","Approved"' +``` + +### Template + +`template` process the incoming data defined in `path` through [sprig template](https://github.com/Masterminds/sprig) before matching with the `value`. + +e.g. + +```yaml +filters: + data: + - path: body.message + type: string + value: + - "hello world" + template: "{{ b64dec .Input }}" +``` + +The message `'{"message":"aGVsbG8gd29ybGQ="}'` will match with the above filter definition. + +**Note**: Data type is assumed to be string before applying the `template`, then cast to the user defined `type` for value matching. + +## Practical examples (comparator) + +1. Create a webhook event-source + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml + +1. Create a webhook sensor with data filter + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-data-simple-1.yaml + +1. Send an HTTP request to event-source + + curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example + +1. You will notice in sensor logs that the event is invalid as it expects for either `hello` or `hey` as the value of `body.message` + +1. Send another HTTP request to event-source + + curl -d '{"message":"hello"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example + +1. Look for a workflow with name starting with `data-workflow-` + +## Further examples + +You can find some examples [here](https://github.com/argoproj/argo-events/tree/master/examples/sensors). diff --git a/docs/sensors/filters/expr.md b/docs/sensors/filters/expr.md new file mode 100644 index 0000000000..b164dbb799 --- /dev/null +++ b/docs/sensors/filters/expr.md @@ -0,0 +1,130 @@ + +# Expr filter + +Expr filters are applied to the event data. A CloudEvent from Webhook event-source has payload structure as: + +```json +{ + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "header": {}, + "body": {}, + } +} +``` + +Expr filters are applied on `data` within the payload. + +## Fields + +An expr filter has following fields: + +```yaml +filters: + exprLogicalOperator: logical_operator_applied + exprs: + - expr: expression_to_evaluate + fields: + - name: parameter_name + path: path_to_parameter_value +``` + +> ⚠️ `PLEASE NOTE` order in which expr filters are declared corresponds to the order in which the Sensor will evaluate them. + +## Logical operator + +Expr filters can be evaluated together in 2 ways: + +- `and`, meaning that all expr filters returning `true` are required for an event to be valid +- `or`, meaning that only one expr filter returning `true` is enough for an event to be valid + +Any kind of error is considered as `false` (e.g. path not existing in event body). + +Such behaviour can be configured with `exprLogicalOperator` field in a Sensor dependency filters, e.g. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: data-filters-example +spec: + dependencies: + - name: sample-dependency + eventSourceName: webhook + eventName: sample-event + filters: + exprLogicalOperator: "or" + exprs: + - expr: a == "b" || c != 10 + fields: + - name: a + path: a + - name: c + path: c + - expr: e == false + fields: + - name: e + path: d.e + # ... +``` + +Available values: + +- `""` (empty), defaulting to `and` +- `and`, default behaviour +- `or` + +> ⚠️ `PLEASE NOTE` Expr logical operator values must be `lower case`. + +## How it works + +The `expr` field defines the expression to be evaluated. The `fields` stanza defines `name` and `path` of each parameter used in the expression. + +`name` is arbitrary and used in the `expr`, `path` defines how to find the value in the data payload then to be assigned to a parameter. + +The expr filter evaluates the expression contained in `expr` using [govaluate](https://github.com/Knetic/govaluate). This library leverages an incredible flexibility and power. + +With govaluate we are able to define complex combination of arithmetic (`-`, `*`, `/`, `**`, `%`), negation (`-`), inversion (`!`), bitwise not (`~`), logical (`&&`, `||`), ternary conditional (`?`, `:`) operators, +together with comparators (`>`, `<`, `>=`, `<=`), comma-separated arrays and custom functions. + +Here some examples: + +- `action =~ "start"` +- `action == "end" && started == true` +- `action =~ "start" || (started == true && instances == 2)` + +To discover all options offered by govaluate, take a look at its [manual](https://github.com/Knetic/govaluate/blob/master/MANUAL.md). + +## Practical example + +1. Create a webhook event-source + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml + +1. Create a webhook sensor with expr filter + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-expressions.yaml + +1. Send an HTTP request to event-source + + curl -d '{ "a": "b", "c": 11, "d": { "e": true } }' -H "Content-Type: application/json" -X POST http://localhost:12000/example + +1. You will notice in sensor logs that the event is invalid as the sensor expects `e == false` + +1. Send another HTTP request to event-source + + curl -d '{ "a": "b", "c": 11, "d": { "e": false } }' -H "Content-Type: application/json" -X POST http://localhost:12000/example + +1. Look for a workflow with name starting with `expr-workflow-` + +## Further examples + +You can find some examples [here](https://github.com/argoproj/argo-events/tree/master/examples/sensors). diff --git a/docs/sensors/filters/intro.md b/docs/sensors/filters/intro.md new file mode 100644 index 0000000000..712573fa61 --- /dev/null +++ b/docs/sensors/filters/intro.md @@ -0,0 +1,57 @@ +# Introduction + +Filters provide a powerful mechanism to apply constraints on the events in order to determine a validity. + +If filters determine an event is valid, this will trigger the action defined by the Sensor. + +If filters determine an event is not valid, this won't trigger any action. + +## Types + +Argo Events offers 5 types of filters: + +1. [`Expr` Filter](expr.md) +1. [`Data` Filter](data.md) +1. [`Script` Filter](script.md) +1. [`Context` Filter](ctx.md) +1. [`Time` Filter](time.md) + +> ⚠️ `PLEASE NOTE` this is the order in which Sensor evaluates filter types: expr, data, context, time. + +## Logical operator + +Filter types can be evaluated together in 2 ways: + +- `and`, meaning that all filters returning `true` are required for an event to be valid +- `or`, meaning that only one filter returning `true` is enough for an event to be valid + +Any kind of filter error is considered as `false` (e.g. path not existing in event body). + +Such behaviour can be configured with `filtersLogicalOperator` field in a Sensor dependency, e.g. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: multiple-filters-example +spec: + dependencies: + - name: sample-dependency + eventSourceName: webhook + eventName: sample-event + filtersLogicalOperator: "or" + filters: + # ... +``` + +Available values: + +- `""` (empty), defaulting to `and` +- `and`, default behaviour +- `or` + +> ⚠️ `PLEASE NOTE` Logical operator values must be `lower case`. + +## Examples + +You can find some examples [here](https://github.com/argoproj/argo-events/tree/master/examples/sensors). diff --git a/docs/sensors/filters/script.md b/docs/sensors/filters/script.md new file mode 100644 index 0000000000..2974c0d7c3 --- /dev/null +++ b/docs/sensors/filters/script.md @@ -0,0 +1,56 @@ +# Script filter + +Script filters can be used to filter the events with [LUA](https://www.lua.org/) scripts. + +Script filters are applied to the event `data`. A CloudEvent from Webhook event-source has payload structure as: + +```json +{ + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "header": {}, + "body": {} + } +} +``` + +## Fields + +An Script filter can be defined under `filters` with a field `script`: + +```yaml +filters: + script: |- + if event.body.a == "b" and event.body.d.e == "z" then return true else return false end +``` + +## Practical example + +1. Create a webhook event-source + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml + +1. Create a webhook sensor with context filter + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-script.yaml + +1. Send an HTTP request to the event-source + + kubectl port-forward svc/webhook-eventsource-svc 12000 + curl -d '{"hello": "world"}' -X POST http://localhost:12000/example + +1. You will notice in sensor logs that the event did not trigger anything. + +1. Send another HTTP request the event-source + + curl -X POST -d '{"a": "b", "d": {"e": "z"}}' http://localhost:12000/example + +1. Then you will see the event successfully triggered a workflow creation. diff --git a/docs/sensors/filters/time.md b/docs/sensors/filters/time.md new file mode 100644 index 0000000000..adaea0989a --- /dev/null +++ b/docs/sensors/filters/time.md @@ -0,0 +1,102 @@ + +# Time Filter + +Time filter is applied to the event time, contained in the event context. A CloudEvent from Webhook event-source has payload structure as: + +```json +{ + "context": { + "type": "type_of_event_source", + "specversion": "cloud_events_version", + "source": "name_of_the_event_source", + "id": "unique_event_id", + "time": "event_time", + "datacontenttype": "type_of_data", + "subject": "name_of_the_configuration_within_event_source" + }, + "data": { + "header": {}, + "body": {}, + } +} +``` + +It filters out events occurring outside the specified time range, so it is specially helpful when +you need to make sure an event occurs between a certain time-frame. + +## Fields + +Time filter has following fields: + +```yaml +filters: + time: + start: time_range_start_utc + stop: time_range_end_utc +``` + +## How it works + +Time filter takes a `start` and `stop` time in `HH:MM:SS` format in UTC. + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-time-filter +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + filters: + time: + start: "02:30:00" + stop: "04:30:00" +``` + +If `stop` is smaller than `start` (`stop` < `start`), the stop time is treated as next day of `start`. + +**Note**: `start` is inclusive while `stop` is exclusive. + +### Time filter behaviour visually explained + +1. if `start` < `stop`: event time must be in `[start, stop)`. + + 00:00:00 00:00:00 00:00:00 + ┃ start stop ┃ start stop ┃ + ─┸─────●───────────────────────○─────┸─────●───────────────────────○─────┸─ + ╰───────── OK ──────────╯ ╰───────── OK ──────────╯ + +1. if `stop` < `start`: event time must be in `[start, stop@Next day)` + (this is equivalent to: event time must be in `[00:00:00, stop) || [start, 00:00:00@Next day)`). + + 00:00:00 00:00:00 00:00:00 + ┃ stop start ┃ stop start ┃ + ─┸───────────○───────────●───────────┸───────────○───────────●───────────┸─ + ─── OK ──────╯ ╰───────── OK ──────────╯ ╰────── OK ─── + +## Practical example + +1. Create a webhook event-source + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml + +1. Create a webhook sensor with time filter + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/filter-with-time.yaml + +1. Send an HTTP request to event-source + + curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example + +1. You will notice one of following behaviours: + + - if you run this example between 02:30 and 04:30, the sensor logs the event is valid + - if you run this example outside time range between 02:30 and 04:30, the sensor logs the event is invalid + +## Further examples + +You can find some examples [here](https://github.com/argoproj/argo-events/tree/master/examples/sensors). diff --git a/docs/sensors/ha.md b/docs/sensors/ha.md index 9201a52f89..0e1a187174 100644 --- a/docs/sensors/ha.md +++ b/docs/sensors/ha.md @@ -9,5 +9,32 @@ elected to be active if the old one is gone. **Please DO NOT manually scale up the replicas, that might cause unexpected behaviors!** +## Kubernetes Leader Election + +By default, Argo Events will use NATS for the HA leader election except when +using a Kafka Eventbus, in which case a leader election is not required as a +Sensor that uses a Kafka EventBus is capable of horizontally scaling. If using +a different EventBus you can opt-in to a Kubernetes native leader election by +specifying the following annotation. +```yaml +annotations: + events.argoproj.io/leader-election: k8s +``` + +To use Kubernetes leader election the following RBAC rules need to be associated +with the Sensor ServiceAccount. +```yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: argo-events-leaderelection-role +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "create", "update"] +``` + +## More + Click [here](../dr_ha_recommendations.md) to learn more information about Argo Events DR/HA recommendations. diff --git a/docs/sensors/more-about-sensors-and-triggers.md b/docs/sensors/more-about-sensors-and-triggers.md index 904028d3a8..9e25098f1b 100644 --- a/docs/sensors/more-about-sensors-and-triggers.md +++ b/docs/sensors/more-about-sensors-and-triggers.md @@ -2,7 +2,7 @@ ## Multiple Dependencies -If there are mulitple dependencies defined in the `Sensor`, you can configure +If there are multiple dependencies defined in the `Sensor`, you can configure [Trigger Conditions](trigger-conditions.md) to determine what kind of situation could get the trigger executed. @@ -18,8 +18,8 @@ from each dependencies will be used to trigger the actions. ## Duplicate Dependencies -Due to technical reasons, same `eventSourceName` and `eventName` combo can not -be referenced twice in one `Sensor` object. For example, following dependency +Due to technical reasons when using the NATS Streaming bus, the same `eventSourceName` and `eventName` combo can not +be referenced twice in one `Sensor` object. For example, the following dependency definitions are not allowed. However, it can be referenced unlimited times in different `Sensor` objects, so if you do have similar requirements, use 2 `Sensor` objects instead. @@ -49,6 +49,8 @@ spec: - "50.0" ``` +Note that this is not an issue for the Jetstream bus, however. + ## Events Delivery Order Following statements are based on using `NATS Streaming` as the EventBus. @@ -60,12 +62,9 @@ acknowledge the second one before the first one is redelivered. ## Events Delivery Guarantee -`NATS Streaming` offers `at-least-once` delivery guarantee. In the `Sensor` -application, an in-memory cache is implemented to cache the events IDs delivered -in the last 5 minutes, this is used to make sure there won't be any duplicate -events delivered. - -Based on this, it is considered as `exact-once` delivery. +`NATS Streaming` offers `at-least-once` delivery guarantee. `Jetstream` has additional features that get closer to "exactly once". In addition, in the `Sensor` application, an in-memory cache is implemented to cache the events IDs delivered +in the last 5 minutes: this is used to make sure there won't be any duplicate +events delivered. Based on this, we are able to achieve 1) "exactly once" in almost all cases, with the exception of pods dying while processing messages, and 2) "at least once" in all cases. ## Trigger Retries @@ -111,3 +110,27 @@ spec: # Defaults to "1" jitter: 2 ``` + +## Trigger Rate Limit + +There's no rate limit for a trigger unless you configure the spec as following: + +```yaml +spec: + triggers: + - rateLimit: + # Second, Minute or Hour, defaults to Second + unit: Second + # Requests per unit + requestsPerUnit: 20 +``` + +## Revision History Limit + +Optionally, a `revisionHistoryLimit` may be configured in the spec as following: + +```yaml +spec: + # Optional + revisionHistoryLimit: 3 +``` diff --git a/docs/sensors/transform.md b/docs/sensors/transform.md new file mode 100644 index 0000000000..ca538693fe --- /dev/null +++ b/docs/sensors/transform.md @@ -0,0 +1,134 @@ +# Event Transformation + +> Available after v1.6.0 + +1. Lua Script: Executes user-defined Lua script to transform the event. + +2. JQ Command: Evaluates JQ command to transform the event. We use to evaluate JQ commands. + +### Note + +* If set, transformations are applied to the event before the filters are applied. + +* Either a Lua script or a JQ command can be used for the transformation, not both. + +* Only event data is available for the transformation and not the context. + +* The event is discarded if the transformation fails. + +## Lua Script + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + transform: + script: |- + event.body.message='updated' + return event + triggers: + - template: + name: webhook-workflow-trigger + conditions: "test-dep" + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: webhook- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by event payload from test-dep + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value +``` + +1. `transform.script` field defines the Lua script that gets executed when an event is received. + +2. The event data is available to Lua execution context via a global variable called `event`. + +3. The above script sets the value of `body.message` field within the event data to a new value called `updated` and returns the event. + +4. The type of the `event` variable is Table and the script must return a Table representing a valid JSON object. + +## JQ Command + +```yaml +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + transform: + jq: ".body.message *= 2" + triggers: + - template: + name: webhook-workflow-trigger-1 + conditions: "test-dep-foo" + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: webhook- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by event payload from test-dep + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value +``` + +1. The above script applies a JQ command `.body.message *= 2` on the event data which appends the value of `.body.message` to itself and +return the event. + +2. The output of the transformation must be a valid JSON object. diff --git a/docs/sensors/trigger-conditions.md b/docs/sensors/trigger-conditions.md index 5fcec05dc2..d6d10e8a04 100644 --- a/docs/sensors/trigger-conditions.md +++ b/docs/sensors/trigger-conditions.md @@ -2,8 +2,7 @@ > v1.0 and after -`Conditions` is a new feature to replace `Circuit` and `Switch`. With -`conditions`, triggers can be executed based on different dependency conditions. +Triggers can be executed based on different dependency `conditions`. An example with `conditions`: @@ -21,8 +20,8 @@ spec: eventSourceName: webhook-a eventName: example02 - name: dep03 - eventSourceName: webhook-b - eventName: example03 + eventSourceName: webhook-b + eventName: example03 triggers: - template: conditions: "dep02" @@ -55,3 +54,24 @@ won't be executed until the expression resolves to true. The operators in If `conditions` is missing, the default conditions to execute the trigger is `&&` logic of all the defined dependencies. + +## Conditions Reset + +When multiple dependencies are defined for a trigger, the trigger won't be executed until the condition expression is resolved to `true`. Sometimes you might want to reset all the stakeholders of the conditions, `conditions reset` is the way to do it. + +For example, your trigger has a condition as `A && B`, both `A` and `B` are expected to have an event everyday. One day for some reason, `A` gets an event but `B` doesn't, then it ends up with today's `A` and tomorrow's `B` triggering an action, which might not be something you want. To avoid that, you can reset the conditions as following: + +```yaml +spec: + triggers: + - template: + conditions: "dep01 && dep02" + conditionsReset: + - byTime: + # Reset conditions at 23:59 + cron: "59 23 * * *" + # Optional, defaults to UTC + # More info for timezone: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones + timezone: America/Los_Angeles + name: trigger01 +``` diff --git a/docs/sensors/triggers/argo-workflow.md b/docs/sensors/triggers/argo-workflow.md index b822fc416b..7c3cf1a9ec 100644 --- a/docs/sensors/triggers/argo-workflow.md +++ b/docs/sensors/triggers/argo-workflow.md @@ -1,6 +1,6 @@ # Argo Workflow Trigger -Argo workflow is K8s custom resource which help orchestrating parallel jobs on Kubernetes. +Argo workflow is K8s custom resource which help orchestrating parallel jobs on Kubernetes.

@@ -14,17 +14,23 @@ Argo workflow is K8s custom resource which help orchestrating parallel jobs on K ## Trigger a workflow +Note: You will need to have [Argo Workflows](https://argoproj.github.io/argo-workflows/) installed to make this work. + 1. Make sure to have the eventbus deployed in the namespace. 1. We will use webhook event-source and sensor to trigger an Argo workflow. +1. Set up the `operate-workflow-sa` service account that the sensor will use + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/master/examples/rbac/sensor-rbac.yaml + 1. Let's set up a webhook event-source to process incoming requests. - + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml 1. Create the sensor. - kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/webhook.yaml + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/special-workflow-trigger-shortened.yaml 1. Let's expose the webhook event-source pod using `port-forward` so that we can make a request to it. @@ -45,27 +51,27 @@ You can learn more about trigger parameterization [here](https://argoproj.github ## Policy -Trigger policy helps you determine the status of the triggered Argo workflow object and decide whether to stop or continue sensor. +Trigger policy helps you determine the status of the triggered Argo workflow object and decide whether to stop or continue sensor. -Take a look at [K8s Trigger Policy](https://argoproj.github.io/argo-events/triggers/k8s-object-trigger/#policy). +Take a look at [K8s Trigger Policy](https://argoproj.github.io/argo-events/sensors/triggers/k8s-object-trigger/#policy). ## Argo CLI -Although the sensor defined above lets you trigger an Argo workflow, it doesn't have the ability to leverage the functionality +In addition to the example above, you can leverage other functionalities provided by the Argo CLI such as, 1. Submit -2. Resubmit -3. Resume -4. Retry -5. Suspend +1. Submit --from +1. Resubmit +1. Resume +1. Retry +1. Suspend +1. Terminate +1. Stop -To make use of Argo CLI operations, The sensor provides the `argoWorkflow` trigger template, +To make use of Argo CLI operations in `argoWorkflow` trigger template, argoWorkflow: - group: argoproj.io - version: v1alpha1 - resource: workflows - operation: submit # submit, resubmit, resume, retry, suspend or terminate + operation: submit # submit, submit-from, resubmit, resume, retry, suspend, terminate or stop Complete example is available [here](https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/special-workflow-trigger.yaml). diff --git a/docs/sensors/triggers/aws-lambda.md b/docs/sensors/triggers/aws-lambda.md index 43e034dd88..8f8e99988a 100644 --- a/docs/sensors/triggers/aws-lambda.md +++ b/docs/sensors/triggers/aws-lambda.md @@ -1,6 +1,6 @@ # AWS Lambda -AWS Lambda provides a tremendous value, but the event driven lambda invocation is limited to +AWS Lambda provides a tremendous value, but the event driven lambda invocation is limited to SNS, SQS and few other event sources. Argo Events makes it easy to integrate lambda with event sources that are not native to AWS. @@ -14,7 +14,6 @@ that are not native to AWS.

- ## Trigger A Simple Lambda 1. Make sure to have eventbus deployed in the namespace. @@ -42,12 +41,12 @@ that are not native to AWS. }; 1. Let's set up webhook event-source to invoke the lambda over http requests. - + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml 1. Let's expose the webhook event-source using `port-forward` so that we can make a request to it. - kubectl -n argo-events port-forward 12000:12000 + kubectl -n argo-events port-forward 12000:12000 1. Deploy the webhook sensor with AWS Lambda trigger. @@ -55,7 +54,7 @@ that are not native to AWS. 1. Once the sensor pod is in running state, make a `curl` request to webhook event-source pod, - curl -d '{"name":"foo"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example + curl -d '{"name":"foo"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example 9. It will trigger the AWS Lambda function `hello`. Look at the CloudWatch logs to verify. @@ -66,7 +65,7 @@ The AWS Lambda trigger specification is available [here](https://github.com/argo ## Request Payload Invoking the AWS Lambda without a request payload would not be very useful. The lambda trigger within a sensor -is invoked when sensor receives an event from the eventbus. In order to construct a request payload based on the event data, sensor offers +is invoked when sensor receives an event from the eventbus. In order to construct a request payload based on the event data, sensor offers `payload` field as a part of the lambda trigger. Let's examine a lambda trigger, @@ -95,11 +94,12 @@ The `payload` declared above will generate a request payload like below, "name": "foo" // name field from event data } -The above payload will be passed in the request to invoke the AWS lambda. You can add however many number of `src` and `dest` under `payload`. +The above payload will be passed in the request to invoke the AWS lambda. You can add however many number of `src` and `dest` under `payload`. **Note**: Take a look at [Parameterization](https://argoproj.github.io/argo-events/tutorials/02-parameterization/) in order to understand how to extract particular key-value from event data. ## Parameterization + Similar to other type of triggers, sensor offers parameterization for the AWS Lambda trigger. Parameterization is specially useful when you want to define a generic trigger template in the sensor and populate values like function name, payload values on the fly. @@ -131,9 +131,10 @@ With `parameters` the sensor will replace the function name `hello` with the val You can learn more about trigger parameterization [here](https://argoproj.github.io/argo-events/tutorials/02-parameterization/). ## Policy -Trigger policy helps you determine the status of the lambda invocation and decide whether to stop or continue sensor. -To determine whether the lamda was successful or not, Lambda trigger provides a `Status` policy. +Trigger policy helps you determine the status of the lambda invocation and decide whether to stop or continue sensor. + +To determine whether the lambda was successful or not, Lambda trigger provides a `Status` policy. The `Status` holds a list of response statuses that are considered valid. awsLambda: @@ -157,4 +158,4 @@ The `Status` holds a list of response statuses that are considered valid. - 200 - 201 -The above lambda trigger will be treated successful only if its invocation returns with either 200 or 201 status. +The above lambda trigger will be treated successful only if its invocation returns with either 200 or 201 status. diff --git a/docs/sensors/triggers/azure-service-bus.md b/docs/sensors/triggers/azure-service-bus.md new file mode 100644 index 0000000000..6452d98e0a --- /dev/null +++ b/docs/sensors/triggers/azure-service-bus.md @@ -0,0 +1,51 @@ +# Azure Service Bus + +Service Bus Trigger allows a sensor to send messages to Azure Service Bus queues and topics. + +## Specification + +The Azure Service Bus trigger specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#azureservicebustrigger). + +## Setup + +1. Create a queue called `test` either using Azure CLI or Azure Service Bus management console. + +1. Fetch your connection string for Azure Service Bus and base64 encode it. + +1. Create a secret called `azure-secret` as follows. + + apiVersion: v1 + kind: Secret + metadata: + name: azure-secret + type: Opaque + data: + connectionstring: + +1. Deploy the secret. + + kubectl -n argo-events apply -f azure-secret.yaml + +1. Let's set up a webhook event-source to process incoming requests. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml + +1. Create a sensor by running the following command. + + kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/azure-service-bus-sensor.yaml + +1. The Service Bus message needs a body. In order to construct a messaged based on your event data, the Azure Service Bus sensor has the payload field as part of the trigger. + + The payload declared above will generate a message body like below, + + { + "message": "some message here" // name/key of the object + } + +1. Let's expose the webhook event-source pod using port-forward so that we can make a request to it. + + kubectl -n argo-events port-forward 12000:12000 + +1. Use either Curl or Postman to send a post request to the http://localhost:12000/example. + + curl -d '{"message":"ok"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example diff --git a/docs/sensors/triggers/build-your-own-trigger.md b/docs/sensors/triggers/build-your-own-trigger.md index 15f11fc673..da4ab9fe50 100644 --- a/docs/sensors/triggers/build-your-own-trigger.md +++ b/docs/sensors/triggers/build-your-own-trigger.md @@ -5,7 +5,7 @@ TektonCD or AirFlow pipelines on GitHub events. ## Custom Trigger -In order to plug your own implementation for a trigger with Argo Events Sensor, you need to +In order to plug your own implementation for a trigger with Argo Events Sensor, you need to run a gRPC server that implements the interface that the sensor expects. ### Interface @@ -36,7 +36,6 @@ Let's walk through the contract, 3. `ApplyPolicy`: This is where your trigger implementation can check whether the triggered resource transitioned into the success state. Depending upon the response from the trigger server, the sensor will either stop processing subsequent triggers, or it will continue to process them. - ### How to define the Custom Trigger in a sensor? @@ -91,7 +90,7 @@ The most important fields are, 1. `parameters`: The parameters override the resource that is fetched by the trigger server. Read more info on parameters [here](https://argoproj.github.io/argo-events/tutorials/02-parameterization/). -1. `payload`: Payload to send to the trigger server. Read more on payload [here](https://argoproj.github.io/argo-events/triggers/http-trigger/#request-payload). +1. `payload`: Payload to send to the trigger server. Read more on payload [here](https://argoproj.github.io/argo-events/sensors/triggers/http-trigger/#request-payload). The complete spec for the custom trigger is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#customtrigger). diff --git a/docs/sensors/triggers/email-trigger.md b/docs/sensors/triggers/email-trigger.md new file mode 100644 index 0000000000..a53de6689c --- /dev/null +++ b/docs/sensors/triggers/email-trigger.md @@ -0,0 +1,104 @@ +# Email Trigger + +The Email trigger is used to send a custom email to a desired set of email addresses using an SMTP server. The intended use is for notifications for a build pipeline, but can be used for any notification scenario. + +## Prerequisite + +1. Deploy the eventbus in the namespace. + +2. Have an SMTP server setup. + +3. Create a kubernetes secret with the SMTP password in your cluster. + + kubectl create secret generic smtp-secret --from-literal=password=$SMTP_PASSWORD + + **Note**: If your SMTP server doesnot require authentication this step can be skipped. + +4. Create a webhook event-source. + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/event-sources/webhook.yaml + +5. Set up port-forwarding to expose the http server. We will + use port-forwarding here. + + kubectl port-forward -n argo-events 12000:12000 + +## Email Trigger + +Lets say we want to send an email to a dynamic recepient using a custom email body template. + +The custom email body template we are going to use is the following: +``` +Hi , + Hello There + +Thanks, +Obi +``` +where the name has to be substituted with the receiver name from the event. + +1. Create a sensor with Email trigger. + + kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/email-trigger.yaml + + **Note**: Please update ```email.port```, ```email.host``` and ```email.username``` to that of your SMTP server. + If your SMTP server doesnot require authentication, the ```email.username``` and ```email.smtpPassword``` should be ommitted. + +2. Send a http request to the event-source-pod to fire the Email trigger. + + curl -d '{"name":"Luke", "to":"your@email.com"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example + + **Note**: You can modify the value for key ```"to"``` to send the email to your address. + +2. Alternatively you can skip providing the ```"to"``` in the payload to send an email to static email address provided in the trigger. + + curl -d '{"name":"Luke"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example + + **Note**: You have to remove the parameterization for ```email.to.0``` and add ```email.to``` like so: + ```yaml + email: + ... + to: + - target1@email.com + - target2@email.com + ... + ``` + +## Parameterization + +We can parameterize the to, from, subject and body of the email trigger for dynamic capabilities. + +The email trigger parameters have the following structure, + + - parameters: + - src: + dependencyName: test-dep + dataKey: body.to + dest: email.to.0 + - src: + dependencyName: test-dep + dataKey: body.to + dest: email.to.-1 + - src: + dependencyName: test-dep + dataKey: body.from + dest: email.from + - src: + dependencyName: test-dep + dataKey: body.subject + dest: email.subject + - src: + dependencyName: test-dep + dataKey: body.emailBody + dest: email.body + + +- ```email.to.index``` can be used to overwite an email address already specified in the trigger at the provided index. (where index is an integer) +- ```email.to.-1``` can be used to append a new email address to the addresses to which an email will be sent. +- ```email.from``` can be used to specify the from address of the email sent. +- ```email.body``` can be used to specify the body of the email which will be sent. +- ```email.subject``` can be used to specify the subject of the email which will be sent. + +To understand more on parameterization, take a look at [this tutorial](https://argoproj.github.io/argo-events/tutorials/02-parameterization/). + +The complete specification of Email trigger is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#emailtrigger). diff --git a/docs/sensors/triggers/http-trigger.md b/docs/sensors/triggers/http-trigger.md index 6f1aa76a3d..cf2ff11dc6 100644 --- a/docs/sensors/triggers/http-trigger.md +++ b/docs/sensors/triggers/http-trigger.md @@ -1,6 +1,6 @@ # HTTP Trigger -Argo Events offers HTTP trigger which can easily invoke serverless functions like OpenFaas, Kubeless, Knative, Nuclio and make REST API calls. +Argo Events offers HTTP trigger which can easily invoke serverless functions like OpenFaaS, Kubeless, Knative, Nuclio and make REST API calls.

@@ -13,6 +13,7 @@ Argo Events offers HTTP trigger which can easily invoke serverless functions lik
## Specification + The HTTP trigger specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#httptrigger). ## REST API Calls @@ -21,33 +22,32 @@ Consider a scenario where your REST API server needs to consume events from even the integration yourself in the server code, although server logic has nothing to do any of the event-sources. This is where Argo Events HTTP trigger can help. The HTTP trigger takes the task of consuming events from event-sources away from API server and seamlessly integrates these events via REST API calls. - -We will set up a basic go http server and connect it with the minio events. +We will set up a basic go http server and connect it with the Minio events. 1. The HTTP server simply prints the request body as follows. package main - + import ( - "fmt" - "io/ioutil" - "net/http" + "fmt" + "io" + "net/http" ) - + func hello(w http.ResponseWriter, req *http.Request) { - body, err := ioutil.ReadAll(req.Body) - if err != nil { - fmt.Printf("%+v\n", err) - return - } - fmt.Println(string(body)) - fmt.Fprintf(w, "hello\n") + body, err := io.ReadAll(req.Body) + if err != nil { + fmt.Printf("%+v\n", err) + return + } + fmt.Println(string(body)) + fmt.Fprintf(w, "hello\n") } - + func main() { - http.HandleFunc("/hello", hello) - fmt.Println("server is listening on 8090") - http.ListenAndServe(":8090", nil) + http.HandleFunc("/hello", hello) + fmt.Println("server is listening on 8090") + http.ListenAndServe(":8090", nil) } 2. Deploy the HTTP server. @@ -63,8 +63,8 @@ We will set up a basic go http server and connect it with the minio events. kubectl -n argo-events port-forward 8090:8090 5. Our goals is to seamlessly integrate Minio S3 bucket notifications with REST API server created in previous step. So, - lets set up the Minio event-source available [here](https://argoproj.github.io/argo-events/setup/minio/). - Don't create the sensor as we will be deploying it in next step. + lets set up the Minio event-source available [here](https://argoproj.github.io/argo-events/setup/minio/). + Don't create the sensor as we will be deploying it in next step. 6. Create a sensor as follows. @@ -72,7 +72,6 @@ We will set up a basic go http server and connect it with the minio events. 7. Now, drop a file onto `input` bucket in Minio server. - 8. The sensor has triggered a http request to the http server. Take a look at the logs. server is listening on 8090 @@ -82,7 +81,7 @@ We will set up a basic go http server and connect it with the minio events. ### Request Payload -In order to construct a request payload based on the event data, sensor offers +In order to construct a request payload based on the event data, sensor offers `payload` field as a part of the HTTP trigger. Let's examine a HTTP trigger, @@ -109,7 +108,7 @@ The `payload` declared above will generate a request payload like below, "bucket": "bucket name from event data" } -The above payload will be passed in the HTTP request. You can add however many number of `src` and `dest` under `payload`. +The above payload will be passed in the HTTP request. You can add however many number of `src` and `dest` under `payload`. **Note**: Take a look at [Parameterization](https://argoproj.github.io/argo-events/tutorials/02-parameterization/) in order to understand how to extract particular key-value from event data. @@ -122,7 +121,8 @@ you want to define a generic trigger template in the sensor and populate values You can learn more about trigger parameterization [here](https://argoproj.github.io/argo-events/tutorials/02-parameterization/). ### Policy -Trigger policy helps you determine the status of the HTTP request and decide whether to stop or continue sensor. + +Trigger policy helps you determine the status of the HTTP request and decide whether to stop or continue sensor. To determine whether the HTTP request was successful or not, the HTTP trigger provides a `Status` policy. The `Status` holds a list of response statuses that are considered valid. @@ -148,39 +148,37 @@ The `Status` holds a list of response statuses that are considered valid. - 200 - 201 -The above HTTP trigger will be treated successful only if the HTTP request returns with either 200 or 201 status. +The above HTTP trigger will be treated successful only if the HTTP request returns with either 200 or 201 status. -## OpenFaas +## OpenFaaS -OpenFaas offers a simple way to spin up serverless functions. Lets see how we can leverage Argo Events HTTP trigger -to invoke OpenFaas function. +OpenFaaS offers a simple way to spin up serverless functions. Lets see how we can leverage Argo Events HTTP trigger +to invoke OpenFaaS function. -1. If you don't have OpenFaas installed, follow the [instructions](https://docs.openfaas.com/deployment/kubernetes/). +1. If you don't have OpenFaaS installed, follow the [instructions](https://docs.openfaas.com/deployment/kubernetes/). 2. Let's create a basic function. You can follow the [steps](https://blog.alexellis.io/serverless-golang-with-openfaas/). - to set up the function. - - - package function - - import ( - "fmt" - ) - - // Handle a serverless request - func Handle(req []byte) string { - return fmt.Sprintf("Hello, Go. You said: %s", string(req)) - } + to set up the function. + + package function + import ( + "fmt" + ) + + // Handle a serverless request + func Handle(req []byte) string { + return fmt.Sprintf("Hello, Go. You said: %s", string(req)) + } 3. Make sure the function pod is up and running. -4. We are going to invoke OpenFaas function on a message on Redis Subscriber. +4. We are going to invoke OpenFaaS function on a message on Redis Subscriber. 5. Let's set up the Redis Database, Redis PubSub event-source as specified [here](https://argoproj.github.io/argo-events/setup/redis/). - Do not create the Redis sensor, we are going to create it in next step. + Do not create the Redis sensor, we are going to create it in next step. -6. Let's create the sensor with OpenFaas trigger. +6. Let's create the sensor with OpenFaaS trigger. apiVersion: argoproj.io/v1alpha1 kind: Sensor @@ -206,7 +204,7 @@ to invoke OpenFaas function. PUBLISH FOO hello -8. As soon as you publish the message, the sensor will invoke the OpenFaas function `gohash`. +8. As soon as you publish the message, the sensor will invoke the OpenFaaS function `gohash`. ## Kubeless @@ -225,8 +223,8 @@ Similar to REST API calls, you can easily invoke Kubeless functions using HTTP t 4. Now, we are going to invoke the Kubeless function when a message is placed on a NATS queue. 5. Let's set up the NATS event-source. Follow [instructions](https://argoproj.github.io/argo-events/setup/nats/#setup) for details. - Do not create the NATS sensor, we are going to create it in next step. - + Do not create the NATS sensor, we are going to create it in next step. + 6. Let's create NATS sensor with HTTP trigger. apiVersion: argoproj.io/v1alpha1 @@ -259,9 +257,9 @@ Similar to REST API calls, you can easily invoke Kubeless functions using HTTP t go run main.go -s localhost foo '{"first_name": "foo", "last_name": "bar"}' 8. It will invoke Kubeless function `hello`. - + {'event-time': None, 'extensions': {'request': }, 'event-type': None, 'event-namespace': None, 'data': '{"first_name":"foo","last_name":"bar"}', 'event-id': None} # Other serverless frameworks -Similar to OpenFaas and Kubeless invocation demonstrated above, you can easily trigger KNative, Nuclio, Fission functions using HTTP trigger. +Similar to OpenFaaS and Kubeless invocation demonstrated above, you can easily trigger KNative, Nuclio, Fission functions using HTTP trigger. diff --git a/docs/sensors/triggers/k8s-object-trigger.md b/docs/sensors/triggers/k8s-object-trigger.md index 419f736926..09885614a8 100644 --- a/docs/sensors/triggers/k8s-object-trigger.md +++ b/docs/sensors/triggers/k8s-object-trigger.md @@ -1,7 +1,8 @@ # Kubernetes Object Trigger -Apart from Argo workflow objects, the sensor lets you trigger standard Kubernetes objects such as Pod, Deployment, Job, CronJob, etc. -Having the ability to trigger standard Kubernetes objects is quite powerful as provides an avenue to +Apart from Argo workflow objects, the sensor lets you trigger any Kubernetes objects including Custom Resources +such as Pod, Deployment, Job, CronJob, etc. +Having the ability to trigger Kubernetes objects is quite powerful as providing an avenue to set up event-driven pipelines for existing workloads.
@@ -39,9 +40,6 @@ set up event-driven pipelines for existing workloads. - template: name: webhook-pod-trigger k8s: - group: "" - version: v1 - resource: pods operation: create source: resource: @@ -62,9 +60,6 @@ set up event-driven pipelines for existing workloads. dependencyName: test-dep dest: spec.containers.0.args.0 -1. The `group`, `version` and `resource` under `k8s` in the trigger template determines the type of - K8s object. Change it accordingly if you want to trigger something else than a pod. - 1. Create the sensor. kubectl apply -n argo-events -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/trigger-standard-k8s-resource.yaml @@ -76,8 +71,8 @@ set up event-driven pipelines for existing workloads. 1. Use either Curl or Postman to send a post request to the `http://localhost:12000/example`. curl -d '{"message":"ok"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - -1. Inspect the logs of the pod, you will something similar as below. + +1. After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / {"context":{"type":"webhook","specVersi \ @@ -105,7 +100,6 @@ set up event-driven pipelines for existing workloads. \ \ __/ \____\______/ - ## Operation You can specify the operation for the trigger using the `operation` key under triggers->template->k8s. @@ -115,6 +109,7 @@ Operation can be either. 1. `create`: Creates the object if not available in K8s cluster. 2. `update`: Updates the object. 3. `patch`: Patches the object using given patch strategy. +4. `delete`: Deletes the object if it exists. More info available at [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#argoproj.io/v1alpha1.StandardK8sTrigger). @@ -127,7 +122,7 @@ You can learn more about trigger parameterization [here](https://argoproj.github ## Policy -Trigger policy helps you determine the status of the triggered K8s object and decide whether to stop or continue sensor. +Trigger policy helps you determine the status of the triggered K8s object and decide whether to stop or continue sensor. To determine whether the K8s object was successful or not, the K8s trigger provides a `Resource Labels` policy. The `Resource Labels` holds a list of labels which are checked against the triggered K8s object to determine the status of the object. @@ -153,4 +148,4 @@ The `Resource Labels` holds a list of labels which are checked against the trigg # defaults to false errorOnBackoffTimeout: true -Complete example is available [here](https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/trigger-with-policy.yaml). +Complete example is available [here](https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/trigger-with-policy.yaml). diff --git a/docs/sensors/triggers/kafka-trigger.md b/docs/sensors/triggers/kafka-trigger.md index 7be5d565fd..8dd3f45217 100644 --- a/docs/sensors/triggers/kafka-trigger.md +++ b/docs/sensors/triggers/kafka-trigger.md @@ -10,9 +10,9 @@ The Kafka trigger specification is available [here](https://github.com/argoproj/ 1. Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Kafka topic. -1. Set up the Minio Event Source [here](https://argoproj.github.io/argo-events/setup/minio/). +1. Set up the Minio Event Source [here](https://argoproj.github.io/argo-events/setup/minio/). Do not create the Minio sensor, we are going to create it in next step. - + 1. Lets create the sensor. apiVersion: argoproj.io/v1alpha1 @@ -44,7 +44,7 @@ The Kafka trigger specification is available [here](https://github.com/argoproj/ dataKey: notification.0.s3.bucket.name dest: bucket -1. The Kafka message needs a body. In order to construct message based on the event data, sensor offers +1. The Kafka message needs a body. In order to construct message based on the event data, sensor offers `payload` field as a part of the Kafka trigger. The `payload` contains the list of `src` which refers to the source event and `dest` which refers to destination key within result request payload. diff --git a/docs/sensors/triggers/nats-trigger.md b/docs/sensors/triggers/nats-trigger.md index 3fee66ed5d..e0e4eb129c 100644 --- a/docs/sensors/triggers/nats-trigger.md +++ b/docs/sensors/triggers/nats-trigger.md @@ -3,6 +3,7 @@ NATS trigger allows sensor to publish events on NATS subjects. This trigger helps source the events from outside world into your messaging queues. ## Specification + The NATS trigger specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#natstrigger). ## Walkthrough @@ -10,9 +11,9 @@ The NATS trigger specification is available [here](https://github.com/argoproj/a 1. Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a NATS subject. -1. Set up the Minio Event Source [here](https://argoproj.github.io/argo-events/setup/minio/). +1. Set up the Minio Event Source [here](https://argoproj.github.io/argo-events/eventsources/setup/minio/). Do not create the Minio sensor, we are going to create it in next step. - + 1. Lets create the sensor. apiVersion: argoproj.io/v1alpha1 @@ -42,7 +43,7 @@ The NATS trigger specification is available [here](https://github.com/argoproj/a dataKey: notification.0.s3.bucket.name dest: bucket -1. The NATS message needs a body. In order to construct message based on the event data, sensor offers +1. The NATS message needs a body. In order to construct message based on the event data, sensor offers `payload` field as a part of the NATS trigger. The `payload` contains the list of `src` which refers to the source event and `dest` which refers to destination key within result request payload. @@ -57,11 +58,11 @@ The NATS trigger specification is available [here](https://github.com/argoproj/a 1. If you are running NATS on local K8s cluster, make sure to `port-forward` to pod. kubectl -n argo-events port-forward 4222:4222 - -1. Subscribe to the subject called `minio-events`. Refer the nats example to publish a message to the subject https://github.com/nats-io/go-nats-examples/tree/master/patterns/publish-subscribe. - + +1. Subscribe to the subject called `minio-events`. Refer the nats example to publish a message to the subject . + go run main.go -s localhost minio-events' 1. Drop a file called `hello.txt` onto the bucket `input` and you will receive the message on NATS subscriber as follows. - + [#1] Received on [minio-events]: '{"bucket":"input","fileName":"hello.txt"}' diff --git a/docs/sensors/triggers/pulsar-trigger.md b/docs/sensors/triggers/pulsar-trigger.md new file mode 100644 index 0000000000..3b3df130e6 --- /dev/null +++ b/docs/sensors/triggers/pulsar-trigger.md @@ -0,0 +1,57 @@ +# Pulsar Trigger + +Pulsar trigger allows sensor to publish events on Pulsar topic. This trigger helps source the events from outside world into your messaging queues. + +## Specification + +The Pulsar trigger specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#pulsartrigger). + +## Walkthrough + +1. Consider a scenario where you are expecting a file drop onto a Minio bucket and want to place that event on a Pulsar topic. + +1. Set up the Minio Event Source [here](https://argoproj.github.io/argo-events/setup/minio/). + Do not create the Minio sensor, we are going to create it in next step. + +1. Lets create the sensor. + + apiVersion: argoproj.io/v1alpha1 + kind: Sensor + metadata: + name: minio-sensor + spec: + dependencies: + - name: test-dep + eventSourceName: minio + eventName: example + triggers: + - template: + name: pulsar-trigger + pulsar: + # Pulsar URL + url: pulsar://pulsar.argo-events.svc:6650 + # Name of the topic + topic: minio-events + payload: + - src: + dependencyName: test-dep + dataKey: notification.0.s3.object.key + dest: fileName + - src: + dependencyName: test-dep + dataKey: notification.0.s3.bucket.name + dest: bucket + +1. The Pulsar message needs a body. In order to construct message based on the event data, sensor offers + `payload` field as a part of the Pulsar trigger. + + The `payload` contains the list of `src` which refers to the source event and `dest` which refers to destination key within result request payload. + + The `payload` declared above will generate a message body like below. + + { + "fileName": "hello.txt" // name/key of the object + "bucket": "input" // name of the bucket + } + +1. Drop a file called `hello.txt` onto the bucket `input` and you will receive the message on Pulsar topic diff --git a/docs/sensors/triggers/slack-trigger.md b/docs/sensors/triggers/slack-trigger.md index ba1ee654fe..28ce06eca3 100644 --- a/docs/sensors/triggers/slack-trigger.md +++ b/docs/sensors/triggers/slack-trigger.md @@ -1,8 +1,9 @@ # Slack Trigger -The Slack trigger is used to send a custom message to a desired Slack channel in a Slack workspace. The intended use is for notifications for a build pipeline, but can be used for any notification scenario. +The Slack trigger is used to send a custom message to a desired Slack channel in a Slack workspace. The intended use is for notifications for a build pipeline, but can be used for any notification scenario. ## Prerequisite + 1. Deploy the eventbus in the namespace. 1. Make sure to have a Slack workspace setup you wish to send a message to. @@ -13,10 +14,11 @@ The Slack trigger is used to send a custom message to a desired Slack channel in 3. Set up port-forwarding to expose the http server. We will use port-forwarding here. - + kubectl port-forward -n argo-events 12000:12000 ## Create a Slack App + We need to create a Slack App which will send messages to your Slack Workspace. We will add OAuth Permissions and add the OAuth token to the k8s cluster via a secret. 1. Create a Slack app by clicking `Create New App` at the [Slack API Page](https://api.slack.com/apps). Name your app and choose your intended Slack Workspace. @@ -29,43 +31,32 @@ We need to create a Slack App which will send messages to your Slack Workspace. 5. You should land back on the `OAuth & Permissions` page. Copy your app's OAuth Access Token. This will allow the trigger to act on behalf of your newly created Slack app. -6. Encode your OAuth token in base64. This can done easily with the command line. - - echo -n "YOUR-OAUTH-TOKEN" | base64 - -7. Create a kubernetes secret file `slack-secret.yaml` with your OAuth token in the following format. - - apiVersion: v1 - kind: Secret - metadata: - name: slack-secret - data: - token: YOUR-BASE64-ENCODED-OAUTH-TOKEN +6. Create a kubernetes secret with the OAuth token in your cluster. -12. Apply the kubernetes secret. - - kubectl -n argo-events apply -f slack-secret.yaml + kubectl create secret generic slack-secret --from-literal=token=$SLACK_OAUTH_TOKEN ## Slack Trigger -We will set up a basic slack trigger and send a default message, and then a dynamic custom message. + +We will set up a basic slack trigger and send a default message, and then a dynamic custom message. 1. Create a sensor with Slack trigger. We will discuss the trigger details in the following sections. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/sensors/slack-trigger.yaml -2. Send a http request to the event-source-pod to fire the Slack trigger. +2. Send a http request to the event-source-pod to fire the Slack trigger. curl -d '{"text":"Hello, World!"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - + **Note**: The default slack-trigger will send the message "hello world" to the #general channel. You may change the default message and channel in slack-trigger.yaml under triggers.slack.channel and triggers.slack.message. -3. Alternatively, you can dynamically determine the channel and message based on parameterization of your event. +3. Alternatively, you can dynamically determine the channel and message based on parameterization of your event. curl -d '{"channel":"random","message":"test message"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example 4. Great! But, how did the sensor use the event to customize the message and channel from the http request? We will see that in next section. ## Parameterization + The slack trigger parameters have the following structure, parameters: @@ -86,18 +77,142 @@ The `src` is the source of event. It contains, The `dest` is the destination key within the result payload. -So, the above trigger paramters will generate a request payload as, +So, the above trigger parameters will generate a request payload as, { "channel": "channel_to_send_message", "message": "message_to_send_to_channel" } - -**_Note_**: If you define both the `contextKey` and `dataKey` within a paramter item, then +**_Note_**: If you define both the `contextKey` and `dataKey` within a parameter item, then the `dataKey` takes the precedence. -You can create any paramater structure you want. To get more info on how to +You can create any parameter structure you want. To get more info on how to generate complex event payloads, take a look at [this library](https://github.com/tidwall/sjson). +## Other Capabilities + +#### Configuring the sender of the Slack message: + + - template: + name: slack-trigger + slack: + sender: + username: "Cool Robot" + icon: ":robot_face:" # emoji or url, e.g. https://example.com/image.png + +#### Sending messages to Slack threads: + + - template: + name: slack-trigger + slack: + thread: + messageAggregationKey: "abcdefg" # aggregate message by some key to send them to the same Slack thread + broadcastMessageToChannel: true # also broadcast the message from the thread to the channel + +#### Sending attachments using [Slack Attachments API](https://api.slack.com/reference/messaging/attachments): + + - template: + name: slack-trigger + slack: + message: "hello world!" + attachments: | + [{ + "title": "Attachment1!", + "title_link": "https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/", + "color": "#18be52", + "fields": [{ + "title": "Hello1", + "value": "Hello World1", + "short": true + }, { + "title": "Hello2", + "value": "Hello World2", + "short": true + }] + }, { + "title": "Attachment2!", + "title_link": "https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/", + "color": "#18be52", + "fields": [{ + "title": "Hello1", + "value": "Hello World1", + "short": true + }, { + "title": "Hello2", + "value": "Hello World2", + "short": true + }] + }] + +#### Sending blocks using [Slack Blocks API](https://api.slack.com/reference/block-kit/blocks): + + - template: + name: slack-trigger + slack: + blocks: | + [{ + "type": "actions", + "block_id": "actionblock789", + "elements": [{ + "type": "datepicker", + "action_id": "datepicker123", + "initial_date": "1990-04-28", + "placeholder": { + "type": "plain_text", + "text": "Select a date" + } + }, + { + "type": "overflow", + "options": [{ + "text": { + "type": "plain_text", + "text": "*this is plain_text text*" + }, + "value": "value-0" + }, + { + "text": { + "type": "plain_text", + "text": "*this is plain_text text*" + }, + "value": "value-1" + }, + { + "text": { + "type": "plain_text", + "text": "*this is plain_text text*" + }, + "value": "value-2" + }, + { + "text": { + "type": "plain_text", + "text": "*this is plain_text text*" + }, + "value": "value-3" + }, + { + "text": { + "type": "plain_text", + "text": "*this is plain_text text*" + }, + "value": "value-4" + } + ], + "action_id": "overflow" + }, + { + "type": "button", + "text": { + "type": "plain_text", + "text": "Click Me" + }, + "value": "click_me_123", + "action_id": "button" + } + ] + }] + The complete specification of Slack trigger is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#slacktrigger). diff --git a/docs/service-accounts.md b/docs/service-accounts.md index 973769113e..17564454cf 100644 --- a/docs/service-accounts.md +++ b/docs/service-accounts.md @@ -5,16 +5,16 @@ A `Service Account` can be specified in the EventSource object with `spec.template.serviceAccountName`, however it is not needed for all the EventSource types except `resource`. For a `resource` EventSource, you need to -specify a Service Accout and give it `list` and `watch` permissions for the +specify a Service Account and give it `list` and `watch` permissions for the resource being watched. For example, if you want to watch actions on `Deployment` objects, you need to: -1. Create a Service Account. +1. Create a Service Account. kubectl -n your-namespace create sa my-sa -2. Grant RBAC privileges to it. +2. Grant RBAC privileges to it. kubectl -n your-namespace create role deployments-watcher --verb=list,watch --resource=deployments.apps @@ -56,12 +56,12 @@ trigger, make sure to grant `create` permission to that resource. For these triggers, you **don't** need to specify a Service Account to the Sensor. -## Service Account for Trigged Workflows (or other K8s resources) +## Service Account for Triggered Workflows (or other K8s resources) When the Sensor is used to trigger a Workflow, you might need to configure the Service Account used in the Workflow spec (**NOT** `spec.template.serviceAccountName`) following Argo Workflow -[instructions](https://github.com/argoproj/argo/blob/master/docs/service-accounts.md). +[instructions](https://github.com/argoproj/argo-workflows/blob/master/docs/service-accounts.md). If it is used to trigger other K8s resources (i.e. a Deployment), make sure to follow least privilege principle. diff --git a/docs/tutorials/01-introduction.md b/docs/tutorials/01-introduction.md index ac1cdfc9dd..b9c4c923c5 100644 --- a/docs/tutorials/01-introduction.md +++ b/docs/tutorials/01-introduction.md @@ -8,17 +8,21 @@ to any type of event-source. ## Prerequisites - Follow the installation guide to set up the Argo Events. -- Make sure to configure Argo Workflow controller to listen to workflow objects - created in `argo-events` namespace. +- Make sure to configure Argo Workflow controller to listen to workflow objects created in `argo-events` namespace. + (See [this](https://github.com/argoproj/argo-workflows/blob/master/docs/managed-namespace.md) link.) + The Workflow Controller will need to be installed either in a cluster-scope configuration (i.e. no "--namespaced" argument) so that it has visibility to all namespaces, or with "--managed-namespace" set to define "argo-events" as a namespace it has visibility to. To deploy Argo Workflows with a cluster-scope configuration you can use this installation yaml file: + + kubectl apply -n argo -f https://github.com/argoproj/argo-workflows/releases/latest/download/install.yaml + - Make sure to read the concepts behind [eventbus](https://argoproj.github.io/argo-events/concepts/eventbus/). [sensor](https://argoproj.github.io/argo-events/concepts/sensor/). [event source](https://argoproj.github.io/argo-events/concepts/event_source/). -- Follow the [instruction](https://github.com/argoproj/argo-events/tree/master/examples) to create a Service Account `operate-workflow-sa` with proper privileges, and make sure the Service Account used by Workflows (here we use `default` in the turorials for demonstration purpose) has proper RBAC settings. +- Follow the [instruction](https://github.com/argoproj/argo-events/tree/master/examples) to create a Service Account `operate-workflow-sa` with proper privileges, and make sure the Service Account used by Workflows (here we use `default` in the tutorials for demonstration purpose) has proper RBAC settings. ## Get Started -We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon a HTTP Post request. +We are going to set up a sensor and event-source for webhook. The goal is to trigger an Argo workflow upon an HTTP Post request. - Let' set up the eventbus. @@ -49,6 +53,10 @@ If the commands are executed successfully, the eventbus, event-source and sensor - Make sure the workflow pod ran successfully. + argo logs -n argo-events @latest + + Should result in something similar to what is below. + _________________________________________ / {"context":{"type":"webhook","specVersi \ | on":"0.3","source":"webhook","e | diff --git a/docs/tutorials/02-parameterization.md b/docs/tutorials/02-parameterization.md index c50d463b3f..6c85ee2367 100644 --- a/docs/tutorials/02-parameterization.md +++ b/docs/tutorials/02-parameterization.md @@ -51,6 +51,7 @@ type of HTTP request. 1. `Body`: This is the request payload from the HTTP request. ### Event Context + Now that we have an understanding of the structure of the event the webhook sensor receives from the event-source over the eventbus, lets see how we can use the event context to parameterize the Argo workflow. @@ -87,6 +88,7 @@ We have successfully extracted the `type` key within the event context and param the workflow to print the value of the `type`. ### Event Data + Now, it is time to use the event data and parameterize the Argo workflow trigger. We will extract the `message` from request payload and get the Argo workflow to print the message. @@ -120,14 +122,18 @@ print the message. \ \ __/ \____\______/ - Yay!! The Argo workflow printed the message. You can add however many number of parameters to update the trigger resource on the fly. **_Note_**: If you define both the `contextKey` and `dataKey` within a parameter, then the `dataKey` takes the precedence. +**_Note_**: When `useRawData` is not specified or explicitly set to false, the parameter +will resolve to a string type. When `useRawData` is set to true, a number, boolean, json +or string parameter may be resolved. + ### Default Values + Each parameter comes with an option to configure the default value. This is specially important when the `key` you defined in the parameter doesn't exist in the event. @@ -161,7 +167,6 @@ important when the `key` you defined in the parameter doesn't exist in the event \ \ __/ \____\______/ -
### Sprig Templates @@ -190,7 +195,6 @@ as follows, dest: metadata.generateName operation: append - Consider the event the sensor received has format like, { @@ -228,7 +232,6 @@ Send a curl request to event-source as follows, and you will see an Argo workflow being sprung with name like `webhook-foobar-xxxxx`. - Check the output of the workflow, it should print something like, ____________________________ @@ -249,6 +252,7 @@ Check the output of the workflow, it should print something like,
### Operations + Sometimes you need the ability to append or prepend a parameter value to an existing value in trigger resource. This is where the `operation` field within a parameter comes handy. @@ -258,7 +262,6 @@ a parameter comes handy. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-04.yaml - 2. Send a HTTP request to the event-source. curl -d '{"message":"hey!!"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example @@ -284,7 +287,6 @@ a parameter comes handy. \ \ __/ \____\______/ - ## Trigger Template Parameterization The parameterization you saw above deals with the trigger resource, but sometimes @@ -299,7 +301,6 @@ The sensor you have been using in this tutorial has one parameter defined in the trigger resource under `k8s`. We will parameterize that `parameter` by applying a parameter at the trigger template level. - 1. Update the `Webhook Sensor` and add parameters at trigger level. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/02-parameterization/sensor-05.yaml @@ -329,7 +330,6 @@ applying a parameter at the trigger template level. \ \ __/ \____\______/ - Great!! You have now learned how to apply parameters at trigger resource and template level. Keep in mind that you can apply default values and operations like prepend and append for trigger template parameters as well. diff --git a/docs/tutorials/03-trigger-sources.md b/docs/tutorials/03-trigger-sources.md index 0880d944a8..a85babcf59 100644 --- a/docs/tutorials/03-trigger-sources.md +++ b/docs/tutorials/03-trigger-sources.md @@ -1,7 +1,8 @@ # Trigger Sources + A trigger source is the source of trigger resource. It can be either external source such as `Git`, `S3`, `K8s Configmap`, `File`, any valid `URL` that hosts the resource or an internal resource -which is defined in the sensor object itself like `Inline` or `Resource`. +which is defined in the sensor object itself like `Inline` or `Resource`. In the previous sections, you have been dealing with the `Resource` trigger source. In this tutorial, we will explore other trigger sources. @@ -10,7 +11,8 @@ In the previous sections, you have been dealing with the `Resource` trigger sour 1. The `Webhook` event-source is already set up. ## Git -Git trigger source refers to K8s trigger refers to the K8s resource stored in Git. + +Git trigger source refers to K8s trigger refers to the K8s resource stored in Git. The specification for the Git source is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#argoproj.io/v1alpha1.GitArtifact). @@ -33,12 +35,13 @@ The specification for the Git source is available [here](https://github.com/argo 6. Use either Curl or Postman to send a post request to the `http://localhost:12000/example`. curl -d '{"message":"ok"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - + 7. Now, you should see an Argo workflow being created. kubectl -n argo-events get wf ## S3 + You can refer to the K8s resource stored on S3 compliant store as the trigger source. For this tutorial, lets set up a minio server which is S3 compliant store. @@ -59,29 +62,31 @@ For this tutorial, lets set up a minio server which is S3 compliant store. 6. Use either Curl or Postman to send a post request to the `http://localhost:12000/example`. curl -d '{"message":"ok"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - + 7. Now, you should see an Argo workflow being created. kubectl -n argo-events get wf ## K8s Configmap + K8s configmap can be treated as trigger source if needed. 1. Lets create a configmap called `trigger-store`. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/trigger-store.yaml - + 2. Create a sensor with trigger source as configmap and refer it to the `trigger-store`. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/03-trigger-sources/sensor-cm.yaml - + 3. Use either Curl or Postman to send a post request to the `http://localhost:12000/example`. curl -d '{"message":"ok"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - + 4. Now, you should see an Argo workflow being created. - + kubectl -n argo-events get wf ## File & URL + File and URL trigger sources are pretty self explanatory. The example sensors are available under [examples/sensors](https://github.com/argoproj/argo-events/tree/master/examples/sensors) folder. diff --git a/docs/tutorials/04-standard-k8s-resources.md b/docs/tutorials/04-standard-k8s-resources.md index 1c4524ba9f..470f46f4db 100644 --- a/docs/tutorials/04-standard-k8s-resources.md +++ b/docs/tutorials/04-standard-k8s-resources.md @@ -10,7 +10,7 @@ provides an avenue to set up pipelines for existing workloads. ## Prerequisites -1. Make sure that the service account used by the Sensor has necessary +1. Make sure that the service account used by the Sensor has necessary permissions to create the Kubernetes resource of your choice. We use `k8s-resource-sa` for below examples, it should be bound to a Role like following. @@ -33,29 +33,24 @@ provides an avenue to set up pipelines for existing workloads. verbs: - create -2. The `Webhook` event-source is already set up. +2. The `Webhook` event-source is already set up. ## Pod -1. Create a sensor with K8s trigger. Pay close attention to the `group`, - `version` and `kind` keys within the trigger resource. These keys determine - the type of kubernetes object. - - You will notice that the `group` key is empty, that means we want to use - `core` group. For any other groups, you need to specify the `group` key. +1. Create a sensor with K8s trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml -2. Use either Curl or Postman to send a post request to the +2. Use either Curl or Postman to send a post request to the `http://localhost:12000/example`. curl -d '{"message":"ok"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example -3. Now, you should see a pod being created. +3. Now, you should see a pod being created. kubectl -n argo-events get po -Output +4. After the pod was completed, inspect the logs of the pod, you will something similar as below. _________________________________________ / {"context":{"type":"webhook","specVersi \ @@ -85,20 +80,20 @@ Output ## Deployment -1. Lets create a sensor with a K8s deployment as trigger. +1. Lets create a sensor with a K8s deployment as trigger. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml -2. Use either Curl or Postman to send a post request to the +2. Use either Curl or Postman to send a post request to the `http://localhost:12000/example`. curl -d '{"message":"ok"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example -3. Now, you should see a deployment being created. Get the corresponding pod. +3. Now, you should see a deployment being created. Get the corresponding pod. kubectl -n argo-events get deployments -Output +4. After the pod was completed, inspect the logs of the pod, you will see something similar as below. _________________________________________ / {"context":{"type":"webhook","specVersi \ diff --git a/docs/tutorials/06-trigger-conditions.md b/docs/tutorials/06-trigger-conditions.md index db9637df16..d82d304e60 100644 --- a/docs/tutorials/06-trigger-conditions.md +++ b/docs/tutorials/06-trigger-conditions.md @@ -22,30 +22,30 @@ want to trigger an Argo workflow if the sensor receives an event from the `Webhook` event-source, but, another workflow if it receives an event from the `Minio` event-source. -1. Create the webhook event-source and event-source. The event-source listens +1. Create the webhook event-source and event-source. The event-source listens to HTTP requests on port `12000`. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/webhook-event-source.yaml -2. Create the minio event-source. The event-source listens to events of type +2. Create the minio event-source. The event-source listens to events of type `PUT` and `DELETE` for objects in bucket `test`. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/minio-event-source.yaml Make sure there are no errors in any of the event-sources. -3. Let's create the sensor. If you take a closer look at the trigger templates, +3. Let's create the sensor. If you take a closer look at the trigger templates, you will notice that it contains a field named `conditions`, which is a boolean expression contains dependency names. So, as soon as the expression is resolved as true, the corresponding trigger will be executed. kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/06-trigger-conditions/sensor-01.yaml -4. Send a HTTP request to Webhook event-source. +4. Send a HTTP request to Webhook event-source. curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example -5. You will notice an Argo worklfow with name `group-1-xxxx` is created with +5. You will notice an Argo workflow with name `group-1-xxxx` is created with following output, __________________________ @@ -63,11 +63,10 @@ Make sure there are no errors in any of the event-sources. \ \ __/ \____\______/ -6. Now, lets generate a Minio event so that we can run `group-2-xxxx` workflow. +6. Now, lets generate a Minio event so that we can run `group-2-xxxx` workflow. Drop a file onto `test` bucket. The workflow that will get created will print the name of the bucket as follows, - ______ < test > ------ @@ -83,7 +82,7 @@ Make sure there are no errors in any of the event-sources. \ \ __/ \____\______/ -5. Great!! You have now learned how to use `conditions`. Lets update the sensor +5. Great!! You have now learned how to use `conditions`. Lets update the sensor with a trigger that waits for both dependencies to resolve. This is the normal sensor behavior if `conditions` is not defined. @@ -92,7 +91,6 @@ Make sure there are no errors in any of the event-sources. Send a HTTP request and perform a file drop on Minio bucket as done above. You should get the following output. - _______________________________ < this is my first webhook test > ------------------------------- diff --git a/docs/tutorials/07-filters.md b/docs/tutorials/07-filters.md deleted file mode 100644 index f7391896ee..0000000000 --- a/docs/tutorials/07-filters.md +++ /dev/null @@ -1,183 +0,0 @@ -# Filters - -In the previous sections, you have seen how to trigger an Argo workflow based on events. In this tutorial, -you will learn how to apply filters on event data and context. Filters provide a powerful mechanism to -apply constraints on the events in order to determine a validity. - -Argo Events offers 3 types of filters: - -1. Data Filter -2. Context Filter -3. Time Filter - -## Prerequisite - -Webhook event-source must be set up. - -## Data Filter -Data filters as the name suggests are applied on the event data. A CloudEvent from Webhook event-source has -payload structure as, - - - { - "context": { - "type": "type_of_event_source", - "specversion": "cloud_events_version", - "source": "name_of_the_event_source", - "id": "unique_event_id", - "time": "event_time", - "datacontenttype": "type_of_data", - "subject": "name_of_the_configuration_within_event_source" - }, - "data": { - "header": {}, - "body": {}, - } - } - -Data Filters are applied on `data` within the payload. We will make a simple HTTP request -to webhook event-source with request data as `{"message":"this is my first webhook"}` and apply -data filter on `message`. - -A data filter has following fields, - - - data: - - path: path_within_event_data - type: types_of_the_data - value: - - list_of_possible_values - -### Comparator - -The data filter offers `comparator` “>=”, “>”, “=”, “!=”, “<”, or “<=”. - -e.g., - - filters: - data: - - path: body.value - type: number - comparator: ">" - value: - - "50.0" - -
- -**Note**: If data type is a `string`, then you can pass either an exact value or a regex. -If data types is bool or float, then you need to pass the exact value. - -1. Lets create a webhook sensor with data filter. - - kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/07-filters/sensor-data-filters.yaml - -2. Send a HTTP request to event-source. - - curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - -3. You will notice that the sensor logs prints the event is invalid as the sensor expects for - either `hello` or `hey` as the value of `body.message`. - -4. Send a valid HTTP request to event-source. - - curl -d '{"message":"hello"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - -5. Watch for a workflow with name `data-workflow-xxxx`. - -### Multiple Paths - -If the HTTP request was less simple and contained multiple paths that we would like to filter against, -we can make use of [multipaths](https://github.com/tidwall/gjson/blob/master/SYNTAX.md#multipaths) to combine -multiple data paths in the payload into one string. - -For a given payload such as: - - { - "body": { - "action":"opened", - "labels": [ - {"id":"1234", "name":"Webhook"}, - {"id":"5678", "name":"Approved"} - ] - } - } - -We want our sensor to fire if the action is "opened" and it has a label of "Webhook" or if the action is "closed" -and it has a label of "Webhook" and "Approved". We could therefore define the path as: - - filters: - data: - - path: "[body.action,body.labels.#(name=="Webhook").name,body.labels.#(name=="Approved").name]" - type: string - ... - -This would return a string like: `["opened","Webhook","Approved"]`. As the resulting data type will be a -`string`, we can pass a regex over it: - - filters: - data: - - path: "[body.action,body.labels.#(name=="Webhook").name,body.labels.#(name=="Approved").name]" - type: string - value: - - "(\bopened\b.*\bWebhook\b)|(\blabeled\b.*(\bWebhook\b.*\bApproved\b))" -### Template - -The data filter offers `template`. -`template` process the incoming data defined in `path` through [sprig template](https://github.com/Masterminds/sprig) before matching with the `value`. - -e.g. - - filters: - data: - - path: body.value - type: string - value: - - "hello world" - template: "{{ b64dec .Input }}" - -message `'{"message":"aGVsbG8gd29ybGQ="}'` will match with the above filter definition. - -**Note**: Data type is assumed to be string before applying the `template`, then cast to the user defined `type` for value matching. - -## Context Filter -Similar to the data filter, you can apply a filter on the context of the event. - -Change the subscriber in the webhook event-source to point it to `context-filter` sensor's URL. - -1. Lets create a webhook sensor with context filter. - - kubectl -n argo-events apply -f https://raw.githubusercontent.com/argoproj/argo-events/stable/examples/tutorials/07-filters/sensor-context-filter.yaml - -2. Send a HTTP request to event-source. - - curl -d '{"message":"this is my first webhook"}' -H "Content-Type: application/json" -X POST http://localhost:12000/example - -3. You will notice that the sensor logs prints the event is invalid as the sensor expects for - either `custom-webhook` as the value of the `source`. - -## Time Filter - -You can also use time filter, which is applied on event time. -It filters out events that occur outside the specified time range, so it is specially helpful when -you need to make sure an event occurs between a certain time-frame. - -Time filter takes a `start` and `stop` time in `HH:MM:SS` format in UTC. If `stop` is smaller than `start`, -the stop time is treated as next day of `start`. Note that `start` is inclusive while `stop` is exclusive. -The diagrams below illustlate these behavior. - -An example of time filter is available under `examples/sensors`. - -1. if `start` < `stop`: event time must be in `[start, stop)`. - - 00:00:00 00:00:00 00:00:00 - ┃ start stop ┃ start stop ┃ - ─┸─────●───────────────────────○─────┸─────●───────────────────────○─────┸─ - ╰───────── OK ──────────╯ ╰───────── OK ──────────╯ - -2. if `stop` < `start`: event time must be in `[start, stop@Next day)` - (this is equivalent to: event time must be in `[00:00:00, stop) || [start, 00:00:00@Next day)`). - - 00:00:00 00:00:00 00:00:00 - ┃ stop start ┃ stop start ┃ - ─┸───────────○───────────●───────────┸───────────○───────────●───────────┸─ - ─── OK ──────╯ ╰───────── OK ──────────╯ ╰────── OK ─── diff --git a/docs/tutorials/08-policy.md b/docs/tutorials/07-policy.md similarity index 92% rename from docs/tutorials/08-policy.md rename to docs/tutorials/07-policy.md index e02ef5e8e0..c92fd85b62 100644 --- a/docs/tutorials/08-policy.md +++ b/docs/tutorials/07-policy.md @@ -10,7 +10,7 @@ Currently, Argo Events supports 2 types of policies: ## Resource Labels Policy This type of policy determines whether trigger completed successfully based on the labels -set on the trigger resource. +set on the trigger resource. Consider a sensor which has an Argo workflow as the trigger. When an Argo workflow completes successfully, the workflow controller sets a label on the resource as `workflows.argoproj.io/completed: 'true'`. @@ -18,13 +18,13 @@ So, in order for sensor to determine whether the trigger workflow completed succ you just need to set the policy labels as `workflows.argoproj.io/completed: 'true'` under trigger template. In addition to labels, you can also define a `backoff` and option to error out if sensor -is unable to determine status of the trigger after the backoff completes. Check out the specification of +is unable to determine status of the trigger after the backoff completes. Check out the specification of resource labels policy [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#k8sresourcepolicy). ## Status Policy For triggers like HTTP request or AWS Lambda, you can apply the `Status Policy` to determine the trigger status. -The Status Policy supports list of expected response statuses. If the status of the HTTP request or Lamda is within +The Status Policy supports list of expected response statuses. If the status of the HTTP request or Lambda is within the statuses defined in the policy, then the trigger is considered successful. Complete specification is available [here](https://github.com/argoproj/argo-events/blob/master/api/sensor.md#statuspolicy). diff --git a/eventbus/common/error.go b/eventbus/common/error.go new file mode 100644 index 0000000000..ddbc27d5c6 --- /dev/null +++ b/eventbus/common/error.go @@ -0,0 +1,15 @@ +package common + +// EventBusError is a particular EventBus related error. +type EventBusError struct { + err error +} + +func (e *EventBusError) Error() string { + return e.err.Error() +} + +// NewEventBusError returns an EventBusError. +func NewEventBusError(err error) error { + return &EventBusError{err: err} +} diff --git a/eventbus/common/error_test.go b/eventbus/common/error_test.go new file mode 100644 index 0000000000..5c98e8c1ac --- /dev/null +++ b/eventbus/common/error_test.go @@ -0,0 +1,25 @@ +package common + +import ( + "errors" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_error(t *testing.T) { + err := fmt.Errorf("error") + var ebErr *EventBusError + assert.False(t, errors.As(err, &ebErr)) + err = fmt.Errorf("err1, %w", err) + assert.False(t, errors.As(err, &ebErr)) + err = NewEventBusError(err) + assert.True(t, errors.As(err, &ebErr)) + err = fmt.Errorf("err3, %w", err) + assert.True(t, errors.As(err, &ebErr)) + err = fmt.Errorf("err4, %w", err) + assert.True(t, errors.As(err, &ebErr)) + err = fmt.Errorf("err5, %w", err) + assert.True(t, errors.As(err, &ebErr)) +} diff --git a/eventbus/common/interface.go b/eventbus/common/interface.go new file mode 100644 index 0000000000..524dc05e8e --- /dev/null +++ b/eventbus/common/interface.go @@ -0,0 +1,50 @@ +package common + +import ( + "context" + "fmt" + "time" + + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type Connection interface { + Close() error + + IsClosed() bool +} + +type EventSourceConnection interface { + Connection + + Publish(ctx context.Context, msg Message) error +} + +type TriggerConnection interface { + Connection + + fmt.Stringer // need to implement String() + + Subscribe(ctx context.Context, + closeCh <-chan struct{}, + resetConditionsCh <-chan struct{}, + lastResetTime time.Time, + transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), + filter func(string, cloudevents.Event) bool, + action func(map[string]cloudevents.Event), + defaultSubject *string) error +} + +type EventSourceDriver interface { + Initialize() error + Connect(clientID string) (EventSourceConnection, error) +} + +type SensorDriver interface { + Initialize() error + Connect(ctx context.Context, + triggerName string, + dependencyExpression string, + deps []Dependency, + atLeastOnce bool) (TriggerConnection, error) +} diff --git a/eventbus/common/structs.go b/eventbus/common/structs.go new file mode 100644 index 0000000000..c19dcd4707 --- /dev/null +++ b/eventbus/common/structs.go @@ -0,0 +1,36 @@ +package common + +import ( + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" +) + +// Auth contains the auth infor for event bus +type Auth struct { + Strategy eventbusv1alpha1.AuthStrategy + Credential *AuthCredential +} + +// AuthCredential host the credential info +type AuthCredential struct { + Token string + Username string + Password string +} + +type MsgHeader struct { + EventSourceName string + EventName string + ID string +} + +type Message struct { + MsgHeader + Body []byte +} + +// Dependency is a struct for dependency info of a sensor +type Dependency struct { + Name string + EventSourceName string + EventName string +} diff --git a/eventbus/driver.go b/eventbus/driver.go new file mode 100644 index 0000000000..06fc095788 --- /dev/null +++ b/eventbus/driver.go @@ -0,0 +1,161 @@ +package eventbus + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-events/common" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/fsnotify/fsnotify" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common/logging" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + jetstreamsource "github.com/argoproj/argo-events/eventbus/jetstream/eventsource" + jetstreamsensor "github.com/argoproj/argo-events/eventbus/jetstream/sensor" + kafkasource "github.com/argoproj/argo-events/eventbus/kafka/eventsource" + kafkasensor "github.com/argoproj/argo-events/eventbus/kafka/sensor" + stansource "github.com/argoproj/argo-events/eventbus/stan/eventsource" + stansensor "github.com/argoproj/argo-events/eventbus/stan/sensor" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" +) + +func GetEventSourceDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, eventSourceName string, defaultSubject string) (eventbuscommon.EventSourceDriver, error) { + auth, err := GetAuth(ctx, eventBusConfig) + if err != nil { + return nil, err + } + if eventSourceName == "" { + return nil, fmt.Errorf("eventSourceName must be specified to create eventbus driver") + } + + logger := logging.FromContext(ctx) + + logger.Infof("eventBusConfig: %+v", eventBusConfig) + + var eventBusType apicommon.EventBusType + switch { + case eventBusConfig.NATS != nil: + eventBusType = apicommon.EventBusNATS + case eventBusConfig.JetStream != nil: + eventBusType = apicommon.EventBusJetStream + case eventBusConfig.Kafka != nil: + eventBusType = apicommon.EventBusKafka + default: + return nil, fmt.Errorf("invalid event bus") + } + + var dvr eventbuscommon.EventSourceDriver + switch eventBusType { + case apicommon.EventBusNATS: + if defaultSubject == "" { + return nil, fmt.Errorf("subject must be specified to create NATS Streaming driver") + } + dvr = stansource.NewSourceSTAN(eventBusConfig.NATS.URL, *eventBusConfig.NATS.ClusterID, eventSourceName, defaultSubject, auth, logger) + case apicommon.EventBusJetStream: + dvr, err = jetstreamsource.NewSourceJetstream(eventBusConfig.JetStream.URL, eventSourceName, eventBusConfig.JetStream.StreamConfig, auth, logger) // don't need to pass in subject because subjects will be derived from dependencies + if err != nil { + return nil, err + } + case apicommon.EventBusKafka: + dvr = kafkasource.NewKafkaSource(eventBusConfig.Kafka, logger) + default: + return nil, fmt.Errorf("invalid eventbus type") + } + return dvr, nil +} + +func GetSensorDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, sensorSpec *v1alpha1.Sensor, hostname string) (eventbuscommon.SensorDriver, error) { + auth, err := GetAuth(ctx, eventBusConfig) + if err != nil { + return nil, err + } + + if sensorSpec == nil { + return nil, fmt.Errorf("sensorSpec required for getting eventbus driver") + } + if sensorSpec.Name == "" { + return nil, fmt.Errorf("sensorSpec name must be set for getting eventbus driver") + } + logger := logging.FromContext(ctx) + + var eventBusType apicommon.EventBusType + switch { + case eventBusConfig.NATS != nil: + eventBusType = apicommon.EventBusNATS + case eventBusConfig.JetStream != nil: + eventBusType = apicommon.EventBusJetStream + case eventBusConfig.Kafka != nil: + eventBusType = apicommon.EventBusKafka + default: + return nil, fmt.Errorf("invalid event bus") + } + + var dvr eventbuscommon.SensorDriver + switch eventBusType { + case apicommon.EventBusNATS: + dvr = stansensor.NewSensorSTAN(eventBusConfig.NATS.URL, *eventBusConfig.NATS.ClusterID, sensorSpec.Name, auth, logger) + return dvr, nil + case apicommon.EventBusJetStream: + dvr, err = jetstreamsensor.NewSensorJetstream(eventBusConfig.JetStream.URL, sensorSpec, eventBusConfig.JetStream.StreamConfig, auth, logger) // don't need to pass in subject because subjects will be derived from dependencies + return dvr, err + case apicommon.EventBusKafka: + dvr = kafkasensor.NewKafkaSensor(eventBusConfig.Kafka, sensorSpec, hostname, logger) + return dvr, nil + default: + return nil, fmt.Errorf("invalid eventbus type") + } +} + +func GetAuth(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig) (*eventbuscommon.Auth, error) { + logger := logging.FromContext(ctx) + + var eventBusAuth *eventbusv1alpha1.AuthStrategy + switch { + case eventBusConfig.NATS != nil: + eventBusAuth = eventBusConfig.NATS.Auth + case eventBusConfig.JetStream != nil: + if eventBusConfig.JetStream.AccessSecret != nil { + eventBusAuth = &eventbusv1alpha1.AuthStrategyBasic + } else { + eventBusAuth = nil + } + case eventBusConfig.Kafka != nil: + eventBusAuth = nil + default: + return nil, fmt.Errorf("invalid event bus") + } + var auth *eventbuscommon.Auth + cred := &eventbuscommon.AuthCredential{} + if eventBusAuth == nil || *eventBusAuth == eventbusv1alpha1.AuthStrategyNone { + auth = &eventbuscommon.Auth{ + Strategy: eventbusv1alpha1.AuthStrategyNone, + } + } else { + v := common.ViperWithLogging() + v.SetConfigName("auth") + v.SetConfigType("yaml") + v.AddConfigPath(common.EventBusAuthFileMountPath) + err := v.ReadInConfig() + if err != nil { + return nil, fmt.Errorf("failed to load auth.yaml. err: %w", err) + } + err = v.Unmarshal(cred) + if err != nil { + logger.Errorw("failed to unmarshal auth.yaml", zap.Error(err)) + return nil, err + } + v.WatchConfig() + v.OnConfigChange(func(e fsnotify.Event) { + // Auth file changed, let it restart + logger.Fatal("Eventbus auth config file changed, exiting..") + }) + auth = &eventbuscommon.Auth{ + Strategy: *eventBusAuth, + Credential: cred, + } + } + + return auth, nil +} diff --git a/eventbus/driver/driver.go b/eventbus/driver/driver.go deleted file mode 100644 index d7553b0c6c..0000000000 --- a/eventbus/driver/driver.go +++ /dev/null @@ -1,56 +0,0 @@ -package driver - -import ( - "context" - - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" - cloudevents "github.com/cloudevents/sdk-go/v2" -) - -// Driver is an interface for event bus -type Driver interface { - Connect() (Connection, error) - - // SubscribeEventSources is used to subscribe multiple event source dependencies - // Parameter - ctx, context - // Parameter - conn, eventbus connection - // Parameter - group, NATS Streaming queue group or Kafka consumer group - // Parameter - closeCh, channel to indicate to close the subscription - // Parameter - dependencyExpr, example: "(dep1 || dep2) && dep3" - // Parameter - dependencies, array of dependencies information - // Parameter - filter, a function used to filter the message - // Parameter - action, a function to be triggered after all conditions meet - SubscribeEventSources(ctx context.Context, conn Connection, group string, closeCh <-chan struct{}, dependencyExpr string, dependencies []Dependency, filter func(string, cloudevents.Event) bool, action func(map[string]cloudevents.Event)) error - - // Publish a message - Publish(conn Connection, message []byte) error -} - -// Connection is an interface of event bus driver -type Connection interface { - Close() error - - IsClosed() bool - - Publish(subject string, data []byte) error -} - -// Auth contains the auth infor for event bus -type Auth struct { - Strategy eventbusv1alpha1.AuthStrategy - Crendential *AuthCredential -} - -// AuthCredential host the credential info -type AuthCredential struct { - Token string - Username string - Password string -} - -// Dependency is a struct for dependency info of a sensor -type Dependency struct { - Name string - EventSourceName string - EventName string -} diff --git a/eventbus/driver/nats.go b/eventbus/driver/nats.go deleted file mode 100644 index ce42363c48..0000000000 --- a/eventbus/driver/nats.go +++ /dev/null @@ -1,449 +0,0 @@ -package driver - -import ( - "context" - "encoding/json" - "strings" - "sync" - "time" - - "github.com/Knetic/govaluate" - cloudevents "github.com/cloudevents/sdk-go/v2" - "github.com/gobwas/glob" - nats "github.com/nats-io/nats.go" - "github.com/nats-io/stan.go" - "github.com/nats-io/stan.go/pb" - "github.com/pkg/errors" - "go.uber.org/zap" - - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" -) - -type natsStreamingConnection struct { - natsConn *nats.Conn - stanConn stan.Conn - - natsConnected bool - stanConnected bool -} - -func (nsc *natsStreamingConnection) Close() error { - if nsc.stanConn != nil { - err := nsc.stanConn.Close() - if err != nil { - return err - } - } - if nsc.natsConn != nil && nsc.natsConn.IsConnected() { - nsc.natsConn.Close() - } - return nil -} - -func (nsc *natsStreamingConnection) IsClosed() bool { - if nsc.natsConn == nil || nsc.stanConn == nil || !nsc.natsConnected || !nsc.stanConnected || nsc.natsConn.IsClosed() { - return true - } - return false -} - -func (nsc *natsStreamingConnection) Publish(subject string, data []byte) error { - return nsc.stanConn.Publish(subject, data) -} - -type natsStreaming struct { - url string - auth *Auth - clusterID string - subject string - clientID string - - logger *zap.SugaredLogger -} - -// NewNATSStreaming returns a nats streaming driver -func NewNATSStreaming(url, clusterID, subject, clientID string, auth *Auth, logger *zap.SugaredLogger) Driver { - return &natsStreaming{ - url: url, - clusterID: clusterID, - subject: subject, - clientID: clientID, - auth: auth, - logger: logger, - } -} - -func (n *natsStreaming) Connect() (Connection, error) { - log := n.logger.With("clientID", n.clientID) - conn := &natsStreamingConnection{} - opts := []nats.Option{ - // Do not reconnect here but handle reconnction outside - nats.NoReconnect(), - nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { - conn.natsConnected = false - log.Errorw("NATS connection lost", zap.Error(err)) - }), - nats.ReconnectHandler(func(nnc *nats.Conn) { - conn.natsConnected = true - log.Info("Reconnected to NATS server") - }), - } - switch n.auth.Strategy { - case eventbusv1alpha1.AuthStrategyToken: - log.Info("NATS auth strategy: Token") - opts = append(opts, nats.Token(n.auth.Crendential.Token)) - case eventbusv1alpha1.AuthStrategyNone: - log.Info("NATS auth strategy: None") - default: - return nil, errors.New("unsupported auth strategy") - } - nc, err := nats.Connect(n.url, opts...) - if err != nil { - log.Errorw("Failed to connect to NATS server", zap.Error(err)) - return nil, err - } - log.Info("Connected to NATS server.") - conn.natsConn = nc - conn.natsConnected = true - - sc, err := stan.Connect(n.clusterID, n.clientID, stan.NatsConn(nc), stan.Pings(5, 60), - stan.SetConnectionLostHandler(func(_ stan.Conn, reason error) { - conn.stanConnected = false - log.Errorw("NATS streaming connection lost", zap.Error(err)) - })) - if err != nil { - log.Errorw("Failed to connect to NATS streaming server", zap.Error(err)) - return nil, err - } - log.Info("Connected to NATS streaming server.") - conn.stanConn = sc - conn.stanConnected = true - return conn, nil -} - -func (n *natsStreaming) Publish(conn Connection, message []byte) error { - return conn.Publish(n.subject, message) -} - -// SubscribeEventSources is used to subscribe multiple event source dependencies -// Parameter - ctx, context -// Parameter - conn, eventbus connection -// Parameter - group, queue group name -// Parameter - closeCh, channel to indicate to close the subscription -// Parameter - dependencyExpr, example: "(dep1 || dep2) && dep3" -// Parameter - dependencies, array of dependencies information -// Parameter - filter, a function used to filter the message -// Parameter - action, a function to be triggered after all conditions meet -func (n *natsStreaming) SubscribeEventSources(ctx context.Context, conn Connection, group string, closeCh <-chan struct{}, dependencyExpr string, dependencies []Dependency, filter func(string, cloudevents.Event) bool, action func(map[string]cloudevents.Event)) error { - log := n.logger.With("clientID", n.clientID) - msgHolder, err := newEventSourceMessageHolder(dependencyExpr, dependencies) - if err != nil { - return err - } - nsc, ok := conn.(*natsStreamingConnection) - if !ok { - return errors.New("not a NATS streaming connection") - } - // use group name as durable name - durableName := group - sub, err := nsc.stanConn.QueueSubscribe(n.subject, group, func(m *stan.Msg) { - n.processEventSourceMsg(m, msgHolder, filter, action, log) - }, stan.DurableName(durableName), - stan.SetManualAckMode(), - stan.StartAt(pb.StartPosition_NewOnly), - stan.AckWait(1*time.Second), - stan.MaxInflight(len(msgHolder.depNames)+2)) - if err != nil { - log.Errorf("failed to subscribe to subject %s", n.subject) - return err - } - log.Infof("Subscribed to subject %s ...", n.subject) - - // Daemon to evict cache - wg := &sync.WaitGroup{} - cacheEvictorStopCh := make(chan struct{}) - wg.Add(1) - go func() { - defer wg.Done() - log.Info("starting ExactOnce cache clean up daemon ...") - ticker := time.NewTicker(60 * time.Second) - defer ticker.Stop() - for { - select { - case <-cacheEvictorStopCh: - log.Info("exiting ExactOnce cache clean up daemon...") - return - case <-ticker.C: - now := time.Now().UnixNano() - num := 0 - msgHolder.smap.Range(func(key, value interface{}) bool { - v := value.(int64) - // Evict cached ID older than 5 minutes - if now-v > 5*60*1000*1000*1000 { - msgHolder.smap.Delete(key) - num++ - log.Debugw("cached ID evicted", "id", key) - } - return true - }) - log.Infof("finished evicting %v cached IDs, time cost: %v ms", num, (time.Now().UnixNano()-now)/1000/1000) - } - } - }() - - for { - select { - case <-ctx.Done(): - log.Info("existing, unsubscribing and closing connection...") - _ = sub.Close() - log.Infof("subscription on subject %s closed", n.subject) - cacheEvictorStopCh <- struct{}{} - wg.Wait() - return nil - case <-closeCh: - log.Info("closing subscription...") - _ = sub.Close() - log.Infof("subscription on subject %s closed", n.subject) - cacheEvictorStopCh <- struct{}{} - wg.Wait() - return nil - } - } -} - -func (n *natsStreaming) processEventSourceMsg(m *stan.Msg, msgHolder *eventSourceMessageHolder, filter func(dependencyName string, event cloudevents.Event) bool, action func(map[string]cloudevents.Event), log *zap.SugaredLogger) { - var event *cloudevents.Event - if err := json.Unmarshal(m.Data, &event); err != nil { - log.Errorf("Failed to convert to a cloudevent, discarding it... err: %v", err) - _ = m.Ack() - return - } - - depName, err := msgHolder.getDependencyName(event.Source(), event.Subject()) - if err != nil { - log.Errorf("Failed to get the dependency name, discarding it... err: %v", err) - _ = m.Ack() - return - } - - if depName == "" || !filter(depName, *event) { - // message not interested - _ = m.Ack() - return - } - - if msgHolder.lastMeetTime > 0 || msgHolder.latestGoodMsgTimestamp > 0 { - // Old redelivered messages should be able to be acked in 60 seconds. - // Reset if the flag didn't get cleared in that period for some reasons. - if time.Now().Unix()-msgHolder.lastMeetTime > 60 { - msgHolder.resetAll() - log.Info("ATTENTION: Reset the flags because they didn't get cleared in 60 seconds...") - } - } - - // NATS Streaming guarantees At Least Once delivery, - // so need to check if the message is duplicate - if _, ok := msgHolder.smap.Load(event.ID()); ok { - log.Infow("ATTENTION: Duplicate delivered message detected", "message", m) - _ = m.Ack() - return - } - - // Clean up old messages before starting a new round - if msgHolder.lastMeetTime > 0 || msgHolder.latestGoodMsgTimestamp > 0 { - // ACK all the old messages after conditions meet - if m.Timestamp <= msgHolder.latestGoodMsgTimestamp { - if depName != "" { - msgHolder.reset(depName) - } - msgHolder.ackAndCache(m, event.ID()) - return - } - return - } - - now := time.Now().Unix() - - // Start a new round - if existingMsg, ok := msgHolder.msgs[depName]; ok { - if m.Timestamp == existingMsg.timestamp { - // Re-delivered latest messge, update delivery timestamp and return - existingMsg.lastDeliveredTime = now - msgHolder.msgs[depName] = existingMsg - return - } else if m.Timestamp < existingMsg.timestamp { - // Re-delivered old message, ack and return - msgHolder.ackAndCache(m, event.ID()) - log.Debugw("Dropping this message because later ones also satisfy", "eventID", event.ID()) - return - } - } - // New message, set and check - msgHolder.msgs[depName] = &eventSourceMessage{seq: m.Sequence, timestamp: m.Timestamp, event: event, lastDeliveredTime: now} - msgHolder.parameters[depName] = true - - // Check if there's any stale message being held. - // Stale message could be message age has been longer than NATS streaming max message age, - // which means it has ben deleted from NATS server side, but it's still held here. - // Use last delivery timestamp to determine that. - hasStale := false - for k, v := range msgHolder.msgs { - // Since the message is not acked, the server will keep re-sending it. - // If a message being held didn't get re-delivered in the last 10 minutes, treat it as stale. - if (now - v.lastDeliveredTime) > 10*60 { - msgHolder.reset(k) - hasStale = true - } - } - if hasStale { - return - } - - result, err := msgHolder.expr.Evaluate(msgHolder.parameters) - if err != nil { - log.Errorf("failed to evaluate dependency expression: %v", err) - // TODO: how to handle this situation? - return - } - if result != true { - return - } - msgHolder.latestGoodMsgTimestamp = m.Timestamp - msgHolder.lastMeetTime = time.Now().Unix() - // Trigger actions - messages := make(map[string]cloudevents.Event) - for k, v := range msgHolder.msgs { - messages[k] = *v.event - } - log.Debugf("Triggering actions for client %s", n.clientID) - - go action(messages) - - msgHolder.reset(depName) - msgHolder.ackAndCache(m, event.ID()) -} - -// eventSourceMessage is used by messageHolder to hold the latest message -type eventSourceMessage struct { - seq uint64 - timestamp int64 - event *cloudevents.Event - // timestamp of last delivered - lastDeliveredTime int64 -} - -// eventSourceMessageHolder is a struct used to hold the message information of subscribed dependencies -type eventSourceMessageHolder struct { - // time that all conditions meet - lastMeetTime int64 - // timestamp of last msg when all the conditions meet - latestGoodMsgTimestamp int64 - expr *govaluate.EvaluableExpression - depNames []string - // Mapping of [eventSourceName + eventName]dependencyName - sourceDepMap map[string]string - parameters map[string]interface{} - msgs map[string]*eventSourceMessage - // A sync map used to cache the message IDs, it is used to guarantee Exact Once triggering - smap *sync.Map -} - -func newEventSourceMessageHolder(dependencyExpr string, dependencies []Dependency) (*eventSourceMessageHolder, error) { - dependencyExpr = strings.ReplaceAll(dependencyExpr, "-", "\\-") - expression, err := govaluate.NewEvaluableExpression(dependencyExpr) - if err != nil { - return nil, err - } - deps := unique(expression.Vars()) - if len(dependencyExpr) == 0 { - return nil, errors.Errorf("no dependencies found: %s", dependencyExpr) - } - - srcDepMap := make(map[string]string) - for _, d := range dependencies { - key := d.EventSourceName + "__" + d.EventName - srcDepMap[key] = d.Name - } - - parameters := make(map[string]interface{}, len(deps)) - msgs := make(map[string]*eventSourceMessage) - for _, dep := range deps { - parameters[dep] = false - } - - return &eventSourceMessageHolder{ - lastMeetTime: int64(0), - latestGoodMsgTimestamp: int64(0), - expr: expression, - depNames: deps, - sourceDepMap: srcDepMap, - parameters: parameters, - msgs: msgs, - smap: new(sync.Map), - }, nil -} - -func (mh *eventSourceMessageHolder) getDependencyName(eventSourceName, eventName string) (string, error) { - for k, v := range mh.sourceDepMap { - sourceGlob, err := glob.Compile(k) - if err != nil { - return "", err - } - if sourceGlob.Match(eventSourceName + "__" + eventName) { - return v, nil - } - } - return "", nil -} - -// Ack the stan message and cache the ID to make sure Exact Once triggering -func (mh *eventSourceMessageHolder) ackAndCache(m *stan.Msg, id string) { - _ = m.Ack() - mh.smap.Store(id, time.Now().UnixNano()) -} - -// Reset the parameter and message that a dependency holds -func (mh *eventSourceMessageHolder) reset(depName string) { - mh.parameters[depName] = false - delete(mh.msgs, depName) - if mh.isCleanedUp() { - mh.lastMeetTime = 0 - mh.latestGoodMsgTimestamp = 0 - } -} - -func (mh *eventSourceMessageHolder) resetAll() { - for k := range mh.msgs { - delete(mh.msgs, k) - } - for k := range mh.parameters { - mh.parameters[k] = false - } - mh.lastMeetTime = 0 - mh.latestGoodMsgTimestamp = 0 -} - -// Check if all the parameters and messages have been cleaned up -func (mh *eventSourceMessageHolder) isCleanedUp() bool { - for _, v := range mh.parameters { - if v == true { - return false - } - } - return len(mh.msgs) == 0 -} - -func unique(stringSlice []string) []string { - if len(stringSlice) == 0 { - return stringSlice - } - keys := make(map[string]bool) - list := []string{} - for _, entry := range stringSlice { - if _, value := keys[entry]; !value { - keys[entry] = true - list = append(list, entry) - } - } - return list -} diff --git a/eventbus/driver_test.go b/eventbus/driver_test.go new file mode 100644 index 0000000000..b07f8af682 --- /dev/null +++ b/eventbus/driver_test.go @@ -0,0 +1,92 @@ +package eventbus + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + testSensorName = "sensor-xxxxx" + testEventSourceName = "es-xxxxx" + testSubject = "subj-xxxxx" + testHostname = "sensor-xxxxx-xxxxx" +) + +var ( + testBadBusConfig = eventbusv1alpha1.BusConfig{} + + testValidSensorSpec = &v1alpha1.Sensor{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{Name: testSensorName}, + Spec: v1alpha1.SensorSpec{}, + Status: v1alpha1.SensorStatus{}, + } + + testNoNameSensorSpec = &v1alpha1.Sensor{ + TypeMeta: metav1.TypeMeta{}, + ObjectMeta: metav1.ObjectMeta{}, + Spec: v1alpha1.SensorSpec{}, + Status: v1alpha1.SensorStatus{}, + } + + testClusterID = "test" + testBusConfig = eventbusv1alpha1.BusConfig{ + NATS: &eventbusv1alpha1.NATSConfig{ + URL: "nats://test:4222", + ClusterID: &testClusterID, + Auth: &eventbusv1alpha1.AuthStrategyNone, + }, + } +) + +func TestGetSensorDriver(t *testing.T) { + t.Run("get driver without eventbus", func(t *testing.T) { + _, err := GetSensorDriver(context.Background(), testBadBusConfig, testValidSensorSpec, testHostname) + assert.Error(t, err) + }) + + t.Run("get driver with none auth eventbus", func(t *testing.T) { + driver, err := GetSensorDriver(context.Background(), testBusConfig, testValidSensorSpec, testHostname) + assert.NoError(t, err) + assert.NotNil(t, driver) + }) + + t.Run("get driver with invalid sensor spec", func(t *testing.T) { + _, err := GetSensorDriver(context.Background(), testBusConfig, testNoNameSensorSpec, testHostname) + assert.Error(t, err) + }) + + t.Run("get driver with nil sensor spec", func(t *testing.T) { + _, err := GetSensorDriver(context.Background(), testBusConfig, nil, testHostname) + assert.Error(t, err) + }) +} + +func TestGetSourceDriver(t *testing.T) { + t.Run("get driver without eventbus", func(t *testing.T) { + _, err := GetEventSourceDriver(context.Background(), testBadBusConfig, testEventSourceName, testSubject) + assert.Error(t, err) + }) + + t.Run("get driver with none auth eventbus", func(t *testing.T) { + driver, err := GetEventSourceDriver(context.Background(), testBusConfig, testEventSourceName, testSubject) + assert.NoError(t, err) + assert.NotNil(t, driver) + }) + + t.Run("get driver without eventSourceName", func(t *testing.T) { + _, err := GetEventSourceDriver(context.Background(), testBusConfig, "", testSubject) + assert.Error(t, err) + }) + + t.Run("get NATS Streaming driver without subject", func(t *testing.T) { + _, err := GetEventSourceDriver(context.Background(), testBusConfig, testEventSourceName, "") + assert.Error(t, err) + }) +} diff --git a/eventbus/eventbus.go b/eventbus/eventbus.go deleted file mode 100644 index 8518df230e..0000000000 --- a/eventbus/eventbus.go +++ /dev/null @@ -1,71 +0,0 @@ -package eventbus - -import ( - "context" - - "github.com/fsnotify/fsnotify" - "github.com/pkg/errors" - "github.com/spf13/viper" - "go.uber.org/zap" - - "github.com/argoproj/argo-events/common" - "github.com/argoproj/argo-events/common/logging" - "github.com/argoproj/argo-events/eventbus/driver" - apicommon "github.com/argoproj/argo-events/pkg/apis/common" - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" -) - -// GetDriver returns a Driver implementation -func GetDriver(ctx context.Context, eventBusConfig eventbusv1alpha1.BusConfig, subject, clientID string) (driver.Driver, error) { - logger := logging.FromContext(ctx) - var eventBusType apicommon.EventBusType - var eventBusAuth *eventbusv1alpha1.AuthStrategy - if eventBusConfig.NATS != nil { - eventBusType = apicommon.EventBusNATS - eventBusAuth = eventBusConfig.NATS.Auth - } else { - return nil, errors.New("invalid event bus") - } - var auth *driver.Auth - cred := &driver.AuthCredential{} - if eventBusAuth == nil || *eventBusAuth == eventbusv1alpha1.AuthStrategyNone { - auth = &driver.Auth{ - Strategy: eventbusv1alpha1.AuthStrategyNone, - } - } else { - v := viper.New() - v.SetConfigName("auth") - v.SetConfigType("yaml") - v.AddConfigPath(common.EventBusAuthFileMountPath) - err := v.ReadInConfig() - if err != nil { - return nil, errors.Errorf("failed to load auth.yaml. err: %+v", err) - } - err = v.Unmarshal(cred) - if err != nil { - logger.Errorw("failed to unmarshal auth.yaml", zap.Error(err)) - return nil, err - } - v.WatchConfig() - v.OnConfigChange(func(e fsnotify.Event) { - logger.Info("eventbus auth config file changed.") - err = v.Unmarshal(cred) - if err != nil { - logger.Errorw("failed to unmarshal auth.yaml after reloading", zap.Error(err)) - } - }) - auth = &driver.Auth{ - Strategy: *eventBusAuth, - Crendential: cred, - } - } - - var dvr driver.Driver - switch eventBusType { - case apicommon.EventBusNATS: - dvr = driver.NewNATSStreaming(eventBusConfig.NATS.URL, *eventBusConfig.NATS.ClusterID, subject, clientID, auth, logger) - default: - return nil, errors.New("invalid eventbus type") - } - return dvr, nil -} diff --git a/eventbus/eventbus_test.go b/eventbus/eventbus_test.go deleted file mode 100644 index 9e10da60d0..0000000000 --- a/eventbus/eventbus_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package eventbus - -import ( - "context" - "testing" - - "github.com/stretchr/testify/assert" - - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" -) - -const ( - testSubject = "subject" - testClientID = "client-xxxxx" -) - -var ( - testBadBusConfig = eventbusv1alpha1.BusConfig{} - - testClusterID = "test" - testBusConfig = eventbusv1alpha1.BusConfig{ - NATS: &eventbusv1alpha1.NATSConfig{ - URL: "nats://test:4222", - ClusterID: &testClusterID, - Auth: &eventbusv1alpha1.AuthStrategyNone, - }, - } -) - -func TestGetDriver(t *testing.T) { - t.Run("get driver without eventbus", func(t *testing.T) { - _, err := GetDriver(context.Background(), testBadBusConfig, testSubject, testClientID) - assert.Error(t, err) - }) - - t.Run("get driver with none auth eventbus", func(t *testing.T) { - driver, err := GetDriver(context.Background(), testBusConfig, testSubject, testClientID) - assert.NoError(t, err) - assert.NotNil(t, driver) - }) -} diff --git a/eventbus/jetstream/base/jetstream.go b/eventbus/jetstream/base/jetstream.go new file mode 100644 index 0000000000..3087830fe1 --- /dev/null +++ b/eventbus/jetstream/base/jetstream.go @@ -0,0 +1,160 @@ +package base + +import ( + "bytes" + "crypto/tls" + "fmt" + + "github.com/argoproj/argo-events/common" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + nats "github.com/nats-io/nats.go" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +type Jetstream struct { + url string + auth *eventbuscommon.Auth + + MgmtConnection JetstreamConnection + + streamSettings string + + Logger *zap.SugaredLogger +} + +func NewJetstream(url string, streamSettings string, auth *eventbuscommon.Auth, logger *zap.SugaredLogger) (*Jetstream, error) { + js := &Jetstream{ + url: url, + auth: auth, + Logger: logger, + streamSettings: streamSettings, + } + + return js, nil +} + +func (stream *Jetstream) Init() error { + mgmtConnection, err := stream.MakeConnection() + if err != nil { + errStr := fmt.Sprintf("error creating Management Connection for Jetstream stream %+v: %v", stream, err) + stream.Logger.Error(errStr) + return fmt.Errorf(errStr) + } + err = stream.CreateStream(mgmtConnection) + if err != nil { + stream.Logger.Errorw("Failed to create Stream", zap.Error(err)) + return err + } + stream.MgmtConnection = *mgmtConnection + + return nil +} + +func (stream *Jetstream) MakeConnection() (*JetstreamConnection, error) { + log := stream.Logger + conn := &JetstreamConnection{Logger: stream.Logger} + + opts := []nats.Option{ + // todo: try out Jetstream's auto-reconnection capability + nats.NoReconnect(), + nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + conn.NATSConnected = false + log.Errorw("NATS connection lost", zap.Error(err)) + }), + nats.ReconnectHandler(func(nnc *nats.Conn) { + conn.NATSConnected = true + log.Info("Reconnected to NATS server") + }), + nats.Secure(&tls.Config{ + InsecureSkipVerify: true, + }), + } + + switch stream.auth.Strategy { + case eventbusv1alpha1.AuthStrategyToken: + log.Info("NATS auth strategy: Token") + opts = append(opts, nats.Token(stream.auth.Credential.Token)) + case eventbusv1alpha1.AuthStrategyBasic: + log.Info("NATS auth strategy: Basic") + opts = append(opts, nats.UserInfo(stream.auth.Credential.Username, stream.auth.Credential.Password)) + case eventbusv1alpha1.AuthStrategyNone: + log.Info("NATS auth strategy: None") + default: + return nil, fmt.Errorf("unsupported auth strategy") + } + nc, err := nats.Connect(stream.url, opts...) + if err != nil { + log.Errorw("Failed to connect to NATS server", zap.Error(err)) + return nil, err + } + conn.NATSConn = nc + conn.NATSConnected = true + + // Create JetStream Context + conn.JSContext, err = nc.JetStream() + if err != nil { + log.Errorw("Failed to get Jetstream context", zap.Error(err)) + return nil, err + } + + log.Info("Connected to NATS Jetstream server.") + return conn, nil +} + +func (stream *Jetstream) CreateStream(conn *JetstreamConnection) error { + if conn == nil { + return fmt.Errorf("Can't create Stream on nil connection") + } + var err error + + // before we add the Stream first let's check to make sure it doesn't already exist + streamInfo, err := conn.JSContext.StreamInfo(common.JetStreamStreamName) + if streamInfo != nil && err == nil { + stream.Logger.Infof("No need to create Stream '%s' as it already exists", common.JetStreamStreamName) + return nil + } + if err != nil && err != nats.ErrStreamNotFound { + stream.Logger.Warnf(`Error calling StreamInfo for Stream '%s' (this can happen if another Jetstream client " + is trying to create the Stream at the same time): %v`, common.JetStreamStreamName, err) + } + + // unmarshal settings + v := viper.New() + v.SetConfigType("yaml") + if err := v.ReadConfig(bytes.NewBufferString(stream.streamSettings)); err != nil { + return err + } + + streamConfig := nats.StreamConfig{ + Name: common.JetStreamStreamName, + Subjects: []string{common.JetStreamStreamName + ".*.*"}, + Retention: nats.LimitsPolicy, + Discard: nats.DiscardOld, + MaxMsgs: v.GetInt64("maxMsgs"), + MaxAge: v.GetDuration("maxAge"), + MaxBytes: v.GetInt64("maxBytes"), + Storage: nats.FileStorage, + Replicas: v.GetInt("replicas"), + Duplicates: v.GetDuration("duplicates"), + } + stream.Logger.Infof("Will use this stream config:\n '%v'", streamConfig) + + connectErr := common.DoWithRetry(nil, func() error { // exponential backoff if it fails the first time + _, err = conn.JSContext.AddStream(&streamConfig) + if err != nil { + errStr := fmt.Sprintf(`Failed to add Jetstream stream '%s'for connection %+v: err=%v`, + common.JetStreamStreamName, conn, err) + return fmt.Errorf(errStr) + } else { + return nil + } + }) + if connectErr != nil { + return connectErr + } + + stream.Logger.Infof("Created Jetstream stream '%s' for connection %+v", common.JetStreamStreamName, conn) + return nil +} diff --git a/eventbus/jetstream/base/jetstream_conn.go b/eventbus/jetstream/base/jetstream_conn.go new file mode 100644 index 0000000000..5b0d8c7b6c --- /dev/null +++ b/eventbus/jetstream/base/jetstream_conn.go @@ -0,0 +1,31 @@ +package base + +import ( + "fmt" + + nats "github.com/nats-io/nats.go" + "go.uber.org/zap" +) + +type JetstreamConnection struct { + NATSConn *nats.Conn + JSContext nats.JetStreamContext + + NATSConnected bool + + Logger *zap.SugaredLogger +} + +func (jsc *JetstreamConnection) Close() error { + if jsc == nil { + return fmt.Errorf("can't close Jetstream connection, JetstreamConnection is nil") + } + if jsc.NATSConn != nil && jsc.NATSConn.IsConnected() { + jsc.NATSConn.Close() + } + return nil +} + +func (jsc *JetstreamConnection) IsClosed() bool { + return jsc == nil || jsc.NATSConn == nil || !jsc.NATSConnected || jsc.NATSConn.IsClosed() +} diff --git a/eventbus/jetstream/eventsource/source_conn.go b/eventbus/jetstream/eventsource/source_conn.go new file mode 100644 index 0000000000..5a0ebbda46 --- /dev/null +++ b/eventbus/jetstream/eventsource/source_conn.go @@ -0,0 +1,48 @@ +package eventsource + +import ( + "context" + "fmt" + + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + jetstreambase "github.com/argoproj/argo-events/eventbus/jetstream/base" + nats "github.com/nats-io/nats.go" +) + +type JetstreamSourceConn struct { + *jetstreambase.JetstreamConnection + eventSourceName string +} + +func CreateJetstreamSourceConn(conn *jetstreambase.JetstreamConnection, eventSourceName string) *JetstreamSourceConn { + return &JetstreamSourceConn{ + conn, eventSourceName, + } +} + +func (jsc *JetstreamSourceConn) Publish(ctx context.Context, + msg eventbuscommon.Message) error { + if jsc == nil { + return fmt.Errorf("Publish() failed; JetstreamSourceConn is nil") + } + + // exactly once on the publishing side is done by assigning a "deduplication key" to the message + dedupKey := nats.MsgId(msg.ID) + + // derive subject from event source name and event name + subject := fmt.Sprintf("default.%s.%s", msg.EventSourceName, msg.EventName) + _, err := jsc.JSContext.Publish(subject, msg.Body, dedupKey) + jsc.Logger.Debugf("published message to subject %s", subject) + return err +} + +func (conn *JetstreamSourceConn) IsClosed() bool { + return conn == nil || conn.JetstreamConnection.IsClosed() +} + +func (conn *JetstreamSourceConn) Close() error { + if conn == nil { + return fmt.Errorf("can't close Jetstream source connection, JetstreamSourceConn is nil") + } + return conn.JetstreamConnection.Close() +} diff --git a/eventbus/jetstream/eventsource/source_jetstream.go b/eventbus/jetstream/eventsource/source_jetstream.go new file mode 100644 index 0000000000..8473929bf0 --- /dev/null +++ b/eventbus/jetstream/eventsource/source_jetstream.go @@ -0,0 +1,36 @@ +package eventsource + +import ( + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + jetstreambase "github.com/argoproj/argo-events/eventbus/jetstream/base" + "go.uber.org/zap" +) + +type SourceJetstream struct { + *jetstreambase.Jetstream + eventSourceName string +} + +func NewSourceJetstream(url, eventSourceName string, streamConfig string, auth *eventbuscommon.Auth, logger *zap.SugaredLogger) (*SourceJetstream, error) { + baseJetstream, err := jetstreambase.NewJetstream(url, streamConfig, auth, logger) + if err != nil { + return nil, err + } + return &SourceJetstream{ + baseJetstream, + eventSourceName, + }, nil +} + +func (n *SourceJetstream) Initialize() error { + return n.Init() // member of jetstreambase.Jetstream +} + +func (n *SourceJetstream) Connect(clientID string) (eventbuscommon.EventSourceConnection, error) { + conn, err := n.MakeConnection() + if err != nil { + return nil, err + } + + return CreateJetstreamSourceConn(conn, n.eventSourceName), nil +} diff --git a/eventbus/jetstream/eventsource/source_jetstream_test.go b/eventbus/jetstream/eventsource/source_jetstream_test.go new file mode 100644 index 0000000000..36ba2a4df2 --- /dev/null +++ b/eventbus/jetstream/eventsource/source_jetstream_test.go @@ -0,0 +1,52 @@ +package eventsource + +import ( + "testing" + + "github.com/argoproj/argo-events/eventbus/common" + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +const ( + testURL = "test-url" + testEventSource = "test-event-source-name" + testStreamConfig = "test-stream-config" +) + +func TestNewSourceJetstream(t *testing.T) { + logger := zap.NewExample().Sugar() + + auth := &common.Auth{} + sourceJetstream, err := NewSourceJetstream(testURL, testEventSource, testStreamConfig, auth, logger) + assert.NotNil(t, sourceJetstream) + assert.Nil(t, err) +} + +func TestSourceJetstream_Connect(t *testing.T) { + logger := zap.NewExample().Sugar() + + auth := &common.Auth{} + sourceJetstream, err := NewSourceJetstream(testURL, testEventSource, testStreamConfig, auth, logger) + assert.NotNil(t, sourceJetstream) + assert.Nil(t, err) + + conn, err := sourceJetstream.Connect("test-client-id") + assert.Nil(t, conn) + assert.NotNil(t, err) +} + +func TestSourceJetstream_Initialize_Failure(t *testing.T) { + logger := zap.NewExample().Sugar() + + auth := &common.Auth{ + Strategy: v1alpha1.AuthStrategyNone, + } + sourceJetstream, err := NewSourceJetstream(testURL, testEventSource, testStreamConfig, auth, logger) + assert.NotNil(t, sourceJetstream) + assert.Nil(t, err) + + err = sourceJetstream.Initialize() + assert.NotNil(t, err) +} diff --git a/eventbus/jetstream/sensor/sensor_jetstream.go b/eventbus/jetstream/sensor/sensor_jetstream.go new file mode 100644 index 0000000000..8897e2aacd --- /dev/null +++ b/eventbus/jetstream/sensor/sensor_jetstream.go @@ -0,0 +1,464 @@ +package sensor + +import ( + "context" + "fmt" + "strings" + "time" + + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + eventbusjetstreambase "github.com/argoproj/argo-events/eventbus/jetstream/base" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + nats "github.com/nats-io/nats.go" + "go.uber.org/zap" + + "encoding/json" + + "github.com/argoproj/argo-events/common" + cloudevents "github.com/cloudevents/sdk-go/v2" + hashstructure "github.com/mitchellh/hashstructure/v2" +) + +const ( + SensorNilError = "sensorSpec == nil??" +) + +type SensorJetstream struct { + *eventbusjetstreambase.Jetstream + + sensorName string + sensorSpec *v1alpha1.Sensor + keyValueStore nats.KeyValue +} + +func NewSensorJetstream(url string, sensorSpec *v1alpha1.Sensor, streamConfig string, auth *eventbuscommon.Auth, logger *zap.SugaredLogger) (*SensorJetstream, error) { + if sensorSpec == nil { + errStr := SensorNilError + logger.Errorf(errStr) + return nil, fmt.Errorf(errStr) + } + + baseJetstream, err := eventbusjetstreambase.NewJetstream(url, streamConfig, auth, logger) + if err != nil { + return nil, err + } + return &SensorJetstream{ + baseJetstream, + sensorSpec.Name, + sensorSpec, + nil}, nil +} + +func (stream *SensorJetstream) Initialize() error { + err := stream.Init() // member of jetstreambase.Jetstream + if err != nil { + return err + } + + // see if there's an existing one + stream.keyValueStore, _ = stream.MgmtConnection.JSContext.KeyValue(stream.sensorName) + if stream.keyValueStore == nil { + // create Key/Value store for this Sensor (seems to be okay to call this if it already exists) + stream.keyValueStore, err = stream.MgmtConnection.JSContext.CreateKeyValue(&nats.KeyValueConfig{Bucket: stream.sensorName}) + if err != nil { + errStr := fmt.Sprintf("failed to Create Key/Value Store for sensor %s, err: %v", stream.sensorName, err) + stream.Logger.Error(errStr) + return err + } + } else { + stream.Logger.Infof("found existing K/V store for sensor %s, using that", stream.sensorName) + } + stream.Logger.Infof("successfully created/located K/V store for sensor %s", stream.sensorName) + + // Here we can take the sensor specification and clean up the K/V store so as to remove any old + // Triggers for this Sensor that no longer exist and any old Dependencies (and also Drain any corresponding Connections) + err = stream.setStateToSpec(stream.sensorSpec) + return err +} + +func (stream *SensorJetstream) Connect(ctx context.Context, triggerName string, dependencyExpression string, deps []eventbuscommon.Dependency, atLeastOnce bool) (eventbuscommon.TriggerConnection, error) { + conn, err := stream.MakeConnection() + if err != nil { + return nil, err + } + + return NewJetstreamTriggerConn(conn, stream.sensorName, triggerName, dependencyExpression, deps) +} + +// Update the K/V store to reflect the current Spec: +// 1. save the current spec, including list of triggers, list of dependencies and how they're defined, and trigger expressions +// 2. selectively purge dependencies from the K/V store if either the Trigger no longer exists, +// the dependency definition has changed, or the trigger expression has changed +// 3. for each dependency purged, delete the associated consumer so no new data is sent there +func (stream *SensorJetstream) setStateToSpec(sensorSpec *v1alpha1.Sensor) error { + log := stream.Logger + if sensorSpec == nil { + errStr := SensorNilError + log.Error(errStr) + return fmt.Errorf(errStr) + } + + log.Infof("Comparing previous Spec stored in k/v store for sensor %s to new Spec", sensorSpec.Name) + + changedDeps, removedDeps, validDeps, err := stream.getChangedDeps(sensorSpec) + if err != nil { + return err + } + log.Infof("Comparison of previous dependencies definitions to current: changed=%v, removed=%v, still valid=%v", changedDeps, removedDeps, validDeps) + + changedTriggers, removedTriggers, validTriggers, err := stream.getChangedTriggers(sensorSpec) // this looks at the list of triggers as well as the dependency expression for each trigger + if err != nil { + return err + } + log.Infof("Comparison of previous trigger list to current: changed=%v, removed=%v, still valid=%v", changedTriggers, removedTriggers, validTriggers) + + // for all valid triggers, determine if changedDeps or removedDeps requires them to be deleted + changedPlusRemovedDeps := make([]string, 0, len(changedDeps)+len(removedDeps)) + changedPlusRemovedDeps = append(changedPlusRemovedDeps, changedDeps...) + changedPlusRemovedDeps = append(changedPlusRemovedDeps, removedDeps...) + for _, triggerName := range validTriggers { + _ = stream.purgeSelectedDepsForTrigger(triggerName, changedPlusRemovedDeps) + } + + // for all changedTriggers (which includes modified and deleted), purge their dependencies + changedPlusRemovedTriggers := make([]string, 0, len(changedTriggers)+len(removedTriggers)) + changedPlusRemovedTriggers = append(changedPlusRemovedTriggers, changedTriggers...) + changedPlusRemovedTriggers = append(changedPlusRemovedTriggers, removedTriggers...) + for _, triggerName := range changedPlusRemovedTriggers { + _ = stream.purgeAllDepsForTrigger(triggerName) + } + + // save new spec + err = stream.saveSpec(sensorSpec, removedTriggers) + if err != nil { + return err + } + + return nil +} + +// purging dependency means both purging it from the K/V store as well as deleting the associated Consumer so no more messages are sent to it +func (stream *SensorJetstream) purgeDependency(triggerName string, depName string) error { + // purge from Key/Value store first + key := getDependencyKey(triggerName, depName) + durableName := getDurableName(stream.sensorName, triggerName, depName) + stream.Logger.Debugf("purging dependency, including 1) key %s from the K/V store, and 2) durable consumer %s", key, durableName) + err := stream.keyValueStore.Delete(key) + if err != nil && err != nats.ErrKeyNotFound { // sometimes we call this on a trigger/dependency combination not sure if it actually exists or not, so + // don't need to worry about case of it not existing + stream.Logger.Error(err) + return err + } + // then delete consumer + stream.Logger.Debugf("durable name for sensor='%s', trigger='%s', dep='%s': '%s'", stream.sensorName, triggerName, depName, durableName) + + _ = stream.MgmtConnection.JSContext.DeleteConsumer("default", durableName) // sometimes we call this on a trigger/dependency combination not sure if it actually exists or not, so + // don't need to worry about case of it not existing + + return nil +} + +func (stream *SensorJetstream) saveSpec(sensorSpec *v1alpha1.Sensor, removedTriggers []string) error { + // remove the old triggers from the K/V store + for _, trigger := range removedTriggers { + key := getTriggerExpressionKey(trigger) + err := stream.keyValueStore.Delete(key) + if err != nil { + errStr := fmt.Sprintf("error deleting key %s: %v", key, err) + stream.Logger.Error(errStr) + return fmt.Errorf(errStr) + } + stream.Logger.Debugf("successfully removed Trigger expression at key %s", key) + } + + // save the dependency definitions + depMap := make(DependencyDefinitionValue) + for _, dep := range sensorSpec.Spec.Dependencies { + hash, err := hashstructure.Hash(dep, hashstructure.FormatV2, nil) + if err != nil { + errStr := fmt.Sprintf("failed to hash dependency %+v", dep) + stream.Logger.Errorf(errStr) + err = fmt.Errorf(errStr) + return err + } + depMap[dep.Name] = hash + } + err := stream.storeDependencyDefinitions(depMap) + if err != nil { + return err + } + + // save the list of Triggers + triggerList := make(TriggerValue, len(sensorSpec.Spec.Triggers)) + for i, trigger := range sensorSpec.Spec.Triggers { + triggerList[i] = trigger.Template.Name + } + err = stream.storeTriggerList(triggerList) + if err != nil { + return err + } + + // for each trigger, save its expression + for _, trigger := range sensorSpec.Spec.Triggers { + err := stream.storeTriggerExpression(trigger.Template.Name, trigger.Template.Conditions) + if err != nil { + return err + } + } + + return nil +} + +func (stream *SensorJetstream) getChangedTriggers(sensorSpec *v1alpha1.Sensor) (changedTriggers []string, removedTriggers []string, validTriggers []string, err error) { + if sensorSpec == nil { + errStr := SensorNilError + stream.Logger.Errorf(errStr) + err = fmt.Errorf(errStr) + return nil, nil, nil, err + } + + mappedSpecTriggers := make(map[string]v1alpha1.Trigger, len(sensorSpec.Spec.Triggers)) + for _, trigger := range sensorSpec.Spec.Triggers { + mappedSpecTriggers[trigger.Template.Name] = trigger + } + storedTriggers, err := stream.getTriggerList() + if err != nil { + return nil, nil, nil, err + } + for _, triggerName := range storedTriggers { + currTrigger, found := mappedSpecTriggers[triggerName] + if !found { + removedTriggers = append(removedTriggers, triggerName) + } else { + // is the trigger expression the same or different? + storedExpression, err := stream.getTriggerExpression(triggerName) + if err != nil { + return nil, nil, nil, err + } + if storedExpression == currTrigger.Template.Conditions { + validTriggers = append(validTriggers, triggerName) + } else { + changedTriggers = append(changedTriggers, triggerName) + } + } + } + return changedTriggers, removedTriggers, validTriggers, nil +} + +func (stream *SensorJetstream) getChangedDeps(sensorSpec *v1alpha1.Sensor) (changedDeps []string, removedDeps []string, validDeps []string, err error) { + if sensorSpec == nil { + errStr := SensorNilError + stream.Logger.Errorf(errStr) + err = fmt.Errorf(errStr) + return nil, nil, nil, err + } + + specDependencies := sensorSpec.Spec.Dependencies + mappedSpecDependencies := make(map[string]v1alpha1.EventDependency, len(specDependencies)) + for _, dep := range specDependencies { + mappedSpecDependencies[dep.Name] = dep + } + storedDependencies, err := stream.getDependencyDefinitions() + if err != nil { + return nil, nil, nil, err + } + for depName, hashedDep := range storedDependencies { + currDep, found := mappedSpecDependencies[depName] + if !found { + removedDeps = append(removedDeps, depName) + } else { + // is the dependency definition the same or different? + hash, err := hashstructure.Hash(currDep, hashstructure.FormatV2, nil) + if err != nil { + errStr := fmt.Sprintf("failed to hash dependency %+v", currDep) + stream.Logger.Errorf(errStr) + err = fmt.Errorf(errStr) + return nil, nil, nil, err + } + if hash == hashedDep { + validDeps = append(validDeps, depName) + } else { + changedDeps = append(changedDeps, depName) + } + } + } + + return changedDeps, removedDeps, validDeps, nil +} + +func (stream *SensorJetstream) getDependencyDefinitions() (DependencyDefinitionValue, error) { + depDefs, err := stream.keyValueStore.Get(DependencyDefsKey) + if err != nil { + if err == nats.ErrKeyNotFound { + return make(DependencyDefinitionValue), nil + } + errStr := fmt.Sprintf("error getting key %s: %v", DependencyDefsKey, err) + stream.Logger.Error(errStr) + return nil, fmt.Errorf(errStr) + } + stream.Logger.Debugf("Value of key %s: %s", DependencyDefsKey, string(depDefs.Value())) + + depDefMap := DependencyDefinitionValue{} + err = json.Unmarshal(depDefs.Value(), &depDefMap) + if err != nil { + errStr := fmt.Sprintf("error unmarshalling value %s of key %s: %v", string(depDefs.Value()), DependencyDefsKey, err) + stream.Logger.Error(errStr) + return nil, fmt.Errorf(errStr) + } + + return depDefMap, nil +} + +func (stream *SensorJetstream) storeDependencyDefinitions(depDef DependencyDefinitionValue) error { + bytes, err := json.Marshal(depDef) + if err != nil { + errStr := fmt.Sprintf("error marshalling %+v: %v", depDef, err) + stream.Logger.Error(errStr) + return fmt.Errorf(errStr) + } + _, err = stream.keyValueStore.Put(DependencyDefsKey, bytes) + if err != nil { + errStr := fmt.Sprintf("error storing %s under key %s: %v", string(bytes), DependencyDefsKey, err) + stream.Logger.Error(errStr) + return fmt.Errorf(errStr) + } + stream.Logger.Debugf("successfully stored dependency definition under key %s: %s", DependencyDefsKey, string(bytes)) + return nil +} + +func (stream *SensorJetstream) getTriggerList() (TriggerValue, error) { + triggerListJson, err := stream.keyValueStore.Get(TriggersKey) + if err != nil { + if err == nats.ErrKeyNotFound { + return make(TriggerValue, 0), nil + } + errStr := fmt.Sprintf("error getting key %s: %v", TriggersKey, err) + stream.Logger.Error(errStr) + return nil, fmt.Errorf(errStr) + } + stream.Logger.Debugf("Value of key %s: %s", TriggersKey, string(triggerListJson.Value())) + + triggerList := TriggerValue{} + err = json.Unmarshal(triggerListJson.Value(), &triggerList) + if err != nil { + errStr := fmt.Sprintf("error unmarshalling value %s of key %s: %v", string(triggerListJson.Value()), TriggersKey, err) + stream.Logger.Error(errStr) + return nil, fmt.Errorf(errStr) + } + + return triggerList, nil +} + +func (stream *SensorJetstream) storeTriggerList(triggerList TriggerValue) error { + bytes, err := json.Marshal(triggerList) + if err != nil { + errStr := fmt.Sprintf("error marshalling %+v: %v", triggerList, err) + stream.Logger.Error(errStr) + return fmt.Errorf(errStr) + } + _, err = stream.keyValueStore.Put(TriggersKey, bytes) + if err != nil { + errStr := fmt.Sprintf("error storing %s under key %s: %v", string(bytes), TriggersKey, err) + stream.Logger.Error(errStr) + return fmt.Errorf(errStr) + } + stream.Logger.Debugf("successfully stored trigger list under key %s: %s", TriggersKey, string(bytes)) + return nil +} + +func (stream *SensorJetstream) getTriggerExpression(triggerName string) (string, error) { + key := getTriggerExpressionKey(triggerName) + expr, err := stream.keyValueStore.Get(key) + if err != nil { + if err == nats.ErrKeyNotFound { + return "", nil + } + errStr := fmt.Sprintf("error getting key %s: %v", key, err) + stream.Logger.Error(errStr) + return "", fmt.Errorf(errStr) + } + stream.Logger.Debugf("Value of key %s: %s", key, string(expr.Value())) + + return string(expr.Value()), nil +} + +func (stream *SensorJetstream) storeTriggerExpression(triggerName string, conditionExpression string) error { + key := getTriggerExpressionKey(triggerName) + _, err := stream.keyValueStore.PutString(key, conditionExpression) + if err != nil { + errStr := fmt.Sprintf("error storing %s under key %s: %v", conditionExpression, key, err) + stream.Logger.Error(errStr) + return fmt.Errorf(errStr) + } + stream.Logger.Debugf("successfully stored trigger expression under key %s: %s", key, conditionExpression) + return nil +} + +func (stream *SensorJetstream) purgeSelectedDepsForTrigger(triggerName string, deps []string) error { + stream.Logger.Debugf("purging selected dependencies %v for trigger %s", deps, triggerName) + for _, dep := range deps { + err := stream.purgeDependency(triggerName, dep) // this will attempt a delete even if no such key exists for a particular trigger, but that's okay + if err != nil { + return err + } + } + return nil +} + +func (stream *SensorJetstream) purgeAllDepsForTrigger(triggerName string) error { + stream.Logger.Debugf("purging all dependencies for trigger %s", triggerName) + // use the stored trigger expression to determine which dependencies need to be purged + storedExpression, err := stream.getTriggerExpression(triggerName) + if err != nil { + return err + } + + // get the individual dependencies by removing the special characters + modExpr := strings.ReplaceAll(storedExpression, "&&", " ") + modExpr = strings.ReplaceAll(modExpr, "||", " ") + modExpr = strings.ReplaceAll(modExpr, "(", " ") + modExpr = strings.ReplaceAll(modExpr, ")", " ") + deps := strings.FieldsFunc(modExpr, func(r rune) bool { return r == ' ' }) + + for _, dep := range deps { + err := stream.purgeDependency(triggerName, dep) + if err != nil { + return err + } + } + return nil +} + +// //////////////////////////////////////////////////////////////////////////////////////////////////// +// These are the Keys and methods to derive Keys for our K/V store +var ( + TriggersKey = "Triggers" + DependencyDefsKey = "Deps" +) + +func getDependencyKey(triggerName string, depName string) string { + return fmt.Sprintf("%s/%s", triggerName, depName) +} + +func getTriggerExpressionKey(triggerName string) string { + return fmt.Sprintf("%s/Expression", triggerName) +} + +// //////////////////////////////////////////////////////////////////////////////////////////////////// +// These are the structs representing Values in our K/V store +type DependencyDefinitionValue map[string]uint64 // value for DependencyDefsKey +type TriggerValue []string // value for TriggersKey + +// value for getDependencyKey() +type MsgInfo struct { + StreamSeq uint64 + ConsumerSeq uint64 + Timestamp time.Time + Event *cloudevents.Event +} + +func getDurableName(sensorName string, triggerName string, depName string) string { + hashKey := fmt.Sprintf("%s-%s-%s", sensorName, triggerName, depName) + hashVal := common.Hasher(hashKey) + return fmt.Sprintf("group-%s", hashVal) +} diff --git a/eventbus/jetstream/sensor/trigger_conn.go b/eventbus/jetstream/sensor/trigger_conn.go new file mode 100644 index 0000000000..bc97e3404f --- /dev/null +++ b/eventbus/jetstream/sensor/trigger_conn.go @@ -0,0 +1,566 @@ +package sensor + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "sync" + "time" + + "github.com/Knetic/govaluate" + cloudevents "github.com/cloudevents/sdk-go/v2" + nats "github.com/nats-io/nats.go" + + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + jetstreambase "github.com/argoproj/argo-events/eventbus/jetstream/base" +) + +type JetstreamTriggerConn struct { + *jetstreambase.JetstreamConnection + sensorName string + triggerName string + keyValueStore nats.KeyValue + dependencyExpression string + requiresANDLogic bool + evaluableExpression *govaluate.EvaluableExpression + deps []eventbuscommon.Dependency + sourceDepMap map[string][]string // maps EventSource and EventName to dependency name + recentMsgsByID map[string]*msg // prevent re-processing the same message as before (map of msg ID to time) + recentMsgsByTime []*msg +} + +type msg struct { + time int64 + msgID string +} + +func NewJetstreamTriggerConn(conn *jetstreambase.JetstreamConnection, + sensorName string, + triggerName string, + dependencyExpression string, + deps []eventbuscommon.Dependency) (*JetstreamTriggerConn, error) { + var err error + + sourceDepMap := make(map[string][]string) + for _, d := range deps { + key := d.EventSourceName + "__" + d.EventName + _, found := sourceDepMap[key] + if !found { + sourceDepMap[key] = make([]string, 0) + } + sourceDepMap[key] = append(sourceDepMap[key], d.Name) + } + + connection := &JetstreamTriggerConn{ + JetstreamConnection: conn, + sensorName: sensorName, + triggerName: triggerName, + dependencyExpression: dependencyExpression, + requiresANDLogic: strings.Contains(dependencyExpression, "&"), + deps: deps, + sourceDepMap: sourceDepMap, + recentMsgsByID: make(map[string]*msg), + recentMsgsByTime: make([]*msg, 0)} + connection.Logger = connection.Logger.With("triggerName", connection.triggerName, "sensorName", connection.sensorName) + + connection.evaluableExpression, err = govaluate.NewEvaluableExpression(strings.ReplaceAll(dependencyExpression, "-", "\\-")) + if err != nil { + errStr := fmt.Sprintf("failed to evaluate expression %s: %v", dependencyExpression, err) + connection.Logger.Error(errStr) + return nil, fmt.Errorf(errStr) + } + + connection.keyValueStore, err = conn.JSContext.KeyValue(sensorName) + if err != nil { + errStr := fmt.Sprintf("failed to get K/V store for sensor %s: %v", sensorName, err) + connection.Logger.Error(errStr) + return nil, fmt.Errorf(errStr) + } + + connection.Logger.Infof("Successfully located K/V store for sensor %s", sensorName) + return connection, nil +} + +func (conn *JetstreamTriggerConn) IsClosed() bool { + return conn == nil || conn.JetstreamConnection.IsClosed() +} + +func (conn *JetstreamTriggerConn) Close() error { + if conn == nil { + return fmt.Errorf("can't close Jetstream trigger connection, JetstreamTriggerConn is nil") + } + return conn.JetstreamConnection.Close() +} + +func (conn *JetstreamTriggerConn) String() string { + if conn == nil { + return "" + } + return fmt.Sprintf("JetstreamTriggerConn{Sensor:%s,Trigger:%s}", conn.sensorName, conn.triggerName) +} + +func (conn *JetstreamTriggerConn) Subscribe(ctx context.Context, + closeCh <-chan struct{}, + resetConditionsCh <-chan struct{}, + lastResetTime time.Time, + transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), + filter func(string, cloudevents.Event) bool, + action func(map[string]cloudevents.Event), + defaultSubject *string) error { + if conn == nil { + return fmt.Errorf("Subscribe() failed; JetstreamTriggerConn is nil") + } + + var err error + log := conn.Logger + // derive subjects that we'll subscribe with using the dependencies passed in + subjects := make(map[string]eventbuscommon.Dependency) + for _, dep := range conn.deps { + subjects[fmt.Sprintf("default.%s.%s", dep.EventSourceName, dep.EventName)] = dep + } + + if !lastResetTime.IsZero() { + err = conn.clearAllDependencies(&lastResetTime) + if err != nil { + errStr := fmt.Sprintf("failed to clear all dependencies as a result of condition reset time; err=%v", err) + log.Error(errStr) + } + } + + ch := make(chan *nats.Msg) // channel with no buffer (I believe this should be okay - we will block writing messages to this channel while a message is still being processed but volume of messages shouldn't be so high as to cause a problem) + wg := sync.WaitGroup{} + processMsgsCloseCh := make(chan struct{}) + pullSubscribeCloseCh := make(map[string]chan struct{}, len(subjects)) + + subscriptions := make([]*nats.Subscription, len(subjects)) + subscriptionIndex := 0 + + // start the goroutines that will listen to the individual subscriptions + for subject, dependency := range subjects { + // set durable name separately for each subscription + durableName := getDurableName(conn.sensorName, conn.triggerName, dependency.Name) + + conn.Logger.Debugf("durable name for sensor='%s', trigger='%s', dep='%s': '%s'", conn.sensorName, conn.triggerName, dependency.Name, durableName) + log.Infof("Subscribing to subject %s with durable name %s", subject, durableName) + subscriptions[subscriptionIndex], err = conn.JSContext.PullSubscribe(subject, durableName, nats.AckExplicit(), nats.DeliverNew()) + if err != nil { + errorStr := fmt.Sprintf("Failed to subscribe to subject %s using group %s: %v", subject, durableName, err) + log.Error(errorStr) + return fmt.Errorf(errorStr) + } else { + log.Debugf("successfully subscribed to subject %s with durable name %s", subject, durableName) + } + + pullSubscribeCloseCh[subject] = make(chan struct{}) + go conn.pullSubscribe(subscriptions[subscriptionIndex], ch, pullSubscribeCloseCh[subject], &wg) + wg.Add(1) + log.Debug("adding 1 to WaitGroup (pullSubscribe)") + + subscriptionIndex++ + } + + // create a single goroutine which which handle receiving messages to ensure that all of the processing is occurring on that + // one goroutine and we don't need to worry about race conditions + go conn.processMsgs(ch, processMsgsCloseCh, resetConditionsCh, transform, filter, action, &wg) + wg.Add(1) + log.Debug("adding 1 to WaitGroup (processMsgs)") + + for { + select { + case <-ctx.Done(): + log.Info("exiting, closing connection...") + conn.shutdownSubscriptions(processMsgsCloseCh, pullSubscribeCloseCh, &wg) + return nil + case <-closeCh: + log.Info("closing connection...") + conn.shutdownSubscriptions(processMsgsCloseCh, pullSubscribeCloseCh, &wg) + return nil + } + } +} + +func (conn *JetstreamTriggerConn) shutdownSubscriptions(processMsgsCloseCh chan struct{}, pullSubscribeCloseCh map[string]chan struct{}, wg *sync.WaitGroup) { + processMsgsCloseCh <- struct{}{} + for _, ch := range pullSubscribeCloseCh { + ch <- struct{}{} + } + wg.Wait() + conn.NATSConn.Close() + conn.Logger.Debug("closed NATSConn") +} + +func (conn *JetstreamTriggerConn) pullSubscribe( + subscription *nats.Subscription, + msgChannel chan<- *nats.Msg, + closeCh <-chan struct{}, + wg *sync.WaitGroup) { + var previousErr error + var previousErrTime time.Time + + for { + // call Fetch with timeout + msgs, fetchErr := subscription.Fetch(1, nats.MaxWait(time.Second*1)) + if fetchErr != nil && !errors.Is(fetchErr, nats.ErrTimeout) { + if previousErr != fetchErr || time.Since(previousErrTime) > 10*time.Second { + // avoid log spew - only log error every 10 seconds + conn.Logger.Errorf("failed to fetch messages for subscription %+v, %v, previousErr=%v, previousErrTime=%v", subscription, fetchErr, previousErr, previousErrTime) + } + previousErr = fetchErr + previousErrTime = time.Now() + } + + // read from close channel but don't block if it's empty + select { + case <-closeCh: + wg.Done() + conn.Logger.Debug("wg.Done(): pullSubscribe") + conn.Logger.Infof("exiting pullSubscribe() for subscription %+v", subscription) + return + default: + } + if fetchErr != nil && !errors.Is(fetchErr, nats.ErrTimeout) { + continue + } + + // then push the msgs to the channel which will consume them + for _, msg := range msgs { + msgChannel <- msg + } + } +} + +func (conn *JetstreamTriggerConn) processMsgs( + receiveChannel <-chan *nats.Msg, + closeCh <-chan struct{}, + resetConditionsCh <-chan struct{}, + transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), + filter func(string, cloudevents.Event) bool, + action func(map[string]cloudevents.Event), + wg *sync.WaitGroup) { + defer func() { + wg.Done() + conn.Logger.Debug("wg.Done(): processMsgs") + }() + + for { + select { + case msg := <-receiveChannel: + conn.processMsg(msg, transform, filter, action) + case <-resetConditionsCh: + conn.Logger.Info("reset conditions") + _ = conn.clearAllDependencies(nil) + case <-closeCh: + conn.Logger.Info("shutting down processMsgs routine") + return + } + } +} + +func (conn *JetstreamTriggerConn) processMsg( + m *nats.Msg, + transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), + filter func(string, cloudevents.Event) bool, + action func(map[string]cloudevents.Event)) { + meta, err := m.Metadata() + if err != nil { + conn.Logger.Errorf("can't get Metadata() for message %+v??", m) + } + + done := make(chan bool) + go func() { + ticker := time.NewTicker(500 * time.Millisecond) + defer ticker.Stop() + for { + select { + case <-done: + err = m.AckSync() + if err != nil { + errStr := fmt.Sprintf("Error performing AckSync() on message: %v", err) + conn.Logger.Error(errStr) + } + conn.Logger.Debugf("acked message of Stream seq: %s:%d, Consumer seq: %s:%d", meta.Stream, meta.Sequence.Stream, meta.Consumer, meta.Sequence.Consumer) + return + case <-ticker.C: + err = m.InProgress() + if err != nil { + errStr := fmt.Sprintf("Error performing InProgess() on message: %v", err) + conn.Logger.Error(errStr) + } + conn.Logger.Debugf("InProgess message of Stream seq: %s:%d, Consumer seq: %s:%d", meta.Stream, meta.Sequence.Stream, meta.Consumer, meta.Sequence.Consumer) + } + } + }() + + defer func() { + done <- true + }() + + log := conn.Logger + + var event *cloudevents.Event + if err := json.Unmarshal(m.Data, &event); err != nil { + log.Errorf("Failed to convert to a cloudevent, discarding it... err: %v", err) + return + } + + // De-duplication + // In the off chance that we receive the same message twice, don't re-process + _, alreadyReceived := conn.recentMsgsByID[event.ID()] + if alreadyReceived { + log.Debugf("already received message of ID %d, ignore this", event.ID()) + return + } + + // get all dependencies for this Trigger that match + depNames, err := conn.getDependencyNames(event.Source(), event.Subject()) + if err != nil || len(depNames) == 0 { + log.Errorf("Failed to get the dependency names, discarding it... err: %v", err) + return + } + + log.Debugf("New incoming Event Source Message, dependency names=%s, Stream seq: %s:%d, Consumer seq: %s:%d", + depNames, meta.Stream, meta.Sequence.Stream, meta.Consumer, meta.Sequence.Consumer) + + for _, depName := range depNames { + conn.processDependency(m, event, depName, transform, filter, action) + } + + // Save message for de-duplication purposes + conn.storeMessageID(event.ID()) + conn.purgeOldMsgs() +} + +func (conn *JetstreamTriggerConn) processDependency( + m *nats.Msg, + event *cloudevents.Event, + depName string, + transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), + filter func(string, cloudevents.Event) bool, + action func(map[string]cloudevents.Event)) { + log := conn.Logger + event, err := transform(depName, *event) + if err != nil { + log.Errorw("failed to apply event transformation, ", err) + return + } + + if !filter(depName, *event) { + // message not interested + log.Infof("not interested in dependency %s (didn't pass filter)", depName) + return + } + + if !conn.requiresANDLogic { + // this is the simple case: we can just perform the trigger + messages := make(map[string]cloudevents.Event) + messages[depName] = *event + log.Infof("Triggering actions after receiving dependency %s", depName) + + action(messages) + } else { + // check Dependency expression (need to retrieve previous dependencies from Key/Value store) + + prevMsgs, err := conn.getSavedDependencies() + if err != nil { + return + } + + // populate 'parameters' map to indicate which dependencies have been received and which haven't + parameters := make(map[string]interface{}, len(conn.deps)) + for _, dep := range conn.deps { + parameters[dep.Name] = false + } + for prevDep := range prevMsgs { + parameters[prevDep] = true + } + parameters[depName] = true + log.Infof("Current state of dependencies: %v", parameters) + + // evaluate the filter expression + result, err := conn.evaluableExpression.Evaluate(parameters) + if err != nil { + errStr := fmt.Sprintf("failed to evaluate dependency expression: %v", err) + log.Error(errStr) + return + } + + // if expression is true, trigger and clear the K/V store + // else save the new message in the K/V store + if result == true { + log.Debugf("dependency expression successfully evaluated to true: '%s'", conn.dependencyExpression) + + messages := make(map[string]cloudevents.Event, len(prevMsgs)+1) + for prevDep, msgInfo := range prevMsgs { + messages[prevDep] = *msgInfo.Event + } + messages[depName] = *event + log.Infof("Triggering actions after receiving dependency %s", depName) + + action(messages) + + _ = conn.clearAllDependencies(nil) + } else { + log.Debugf("dependency expression false: %s", conn.dependencyExpression) + msgMetadata, err := m.Metadata() + if err != nil { + errStr := fmt.Sprintf("message %+v is not a jetstream message???: %v", m, err) + log.Error(errStr) + return + } + _ = conn.saveDependency(depName, + MsgInfo{ + StreamSeq: msgMetadata.Sequence.Stream, + ConsumerSeq: msgMetadata.Sequence.Consumer, + Timestamp: msgMetadata.Timestamp, + Event: event}) + } + } +} + +func (conn *JetstreamTriggerConn) getSavedDependencies() (map[string]MsgInfo, error) { + // dependencies are formatted "//"" + prevMsgs := make(map[string]MsgInfo) + + // for each dependency that's in our dependency expression, look for it: + for _, dep := range conn.deps { + msgInfo, found, err := conn.getSavedDependency(dep.Name) + if err != nil { + return prevMsgs, err + } + if found { + prevMsgs[dep.Name] = msgInfo + } + } + + return prevMsgs, nil +} + +func (conn *JetstreamTriggerConn) getSavedDependency(depName string) (msg MsgInfo, found bool, err error) { + key := getDependencyKey(conn.triggerName, depName) + entry, err := conn.keyValueStore.Get(key) + if err == nil { + if entry != nil { + var msgInfo MsgInfo + err := json.Unmarshal(entry.Value(), &msgInfo) + if err != nil { + errStr := fmt.Sprintf("error unmarshalling value %s for key %s: %v", string(entry.Value()), key, err) + conn.Logger.Error(errStr) + return MsgInfo{}, true, fmt.Errorf(errStr) + } + return msgInfo, true, nil + } + } else if err != nats.ErrKeyNotFound { + return MsgInfo{}, false, err + } + + return MsgInfo{}, false, nil +} + +func (conn *JetstreamTriggerConn) saveDependency(depName string, msgInfo MsgInfo) error { + log := conn.Logger + jsonEncodedMsg, err := json.Marshal(msgInfo) + if err != nil { + errorStr := fmt.Sprintf("failed to convert msgInfo struct into JSON: %+v", msgInfo) + log.Error(errorStr) + return fmt.Errorf(errorStr) + } + key := getDependencyKey(conn.triggerName, depName) + + _, err = conn.keyValueStore.Put(key, jsonEncodedMsg) + if err != nil { + errorStr := fmt.Sprintf("failed to store dependency under key %s, value:%s: %+v", key, jsonEncodedMsg, err) + log.Error(errorStr) + return fmt.Errorf(errorStr) + } + + return nil +} + +func (conn *JetstreamTriggerConn) clearAllDependencies(beforeTimeOpt *time.Time) error { + for _, dep := range conn.deps { + if beforeTimeOpt != nil && !beforeTimeOpt.IsZero() { + err := conn.clearDependencyIfExistsBeforeTime(dep.Name, *beforeTimeOpt) + if err != nil { + return err + } + } else { + err := conn.clearDependencyIfExists(dep.Name) + if err != nil { + return err + } + } + } + return nil +} + +func (conn *JetstreamTriggerConn) clearDependencyIfExistsBeforeTime(depName string, beforeTime time.Time) error { + key := getDependencyKey(conn.triggerName, depName) + + // first get the value (if it exists) to determine if it occurred before or after the time in question + msgInfo, found, err := conn.getSavedDependency(depName) + if err != nil { + return err + } + if found { + // determine if the dependency is from before the time in question + if msgInfo.Timestamp.Before(beforeTime) { + conn.Logger.Debugf("clearing key %s from the K/V store since its message time %+v occurred before %+v; MsgInfo:%+v", + key, msgInfo.Timestamp.Local(), beforeTime.Local(), msgInfo) + err := conn.keyValueStore.Delete(key) + if err != nil && err != nats.ErrKeyNotFound { + conn.Logger.Error(err) + return err + } + } + } + + return nil +} + +func (conn *JetstreamTriggerConn) clearDependencyIfExists(depName string) error { + key := getDependencyKey(conn.triggerName, depName) + conn.Logger.Debugf("clearing key %s from the K/V store", key) + err := conn.keyValueStore.Delete(key) + if err != nil && err != nats.ErrKeyNotFound { + conn.Logger.Error(err) + return err + } + return nil +} + +func (conn *JetstreamTriggerConn) getDependencyNames(eventSourceName, eventName string) ([]string, error) { + deps, found := conn.sourceDepMap[eventSourceName+"__"+eventName] + if !found { + errStr := fmt.Sprintf("incoming event source and event not associated with any dependencies, event source=%s, event=%s", + eventSourceName, eventName) + conn.Logger.Error(errStr) + return nil, fmt.Errorf(errStr) + } + + return deps, nil +} + +// save the message in our recent messages list (for de-duplication purposes) +func (conn *JetstreamTriggerConn) storeMessageID(id string) { + now := time.Now().UnixNano() + saveMsg := &msg{msgID: id, time: now} + conn.recentMsgsByID[id] = saveMsg + conn.recentMsgsByTime = append(conn.recentMsgsByTime, saveMsg) +} + +func (conn *JetstreamTriggerConn) purgeOldMsgs() { + now := time.Now().UnixNano() + + // evict any old messages from our message cache + for _, msg := range conn.recentMsgsByTime { + if now-msg.time > 60*1000*1000*1000 { // older than 1 minute + conn.Logger.Debugf("deleting message %v from cache", *msg) + delete(conn.recentMsgsByID, msg.msgID) + conn.recentMsgsByTime = conn.recentMsgsByTime[1:] + } else { + break // these are ordered by time so we can break when we hit one that's still valid + } + } +} diff --git a/eventbus/kafka/base/kafka.go b/eventbus/kafka/base/kafka.go new file mode 100644 index 0000000000..b2abc11944 --- /dev/null +++ b/eventbus/kafka/base/kafka.go @@ -0,0 +1,112 @@ +package base + +import ( + "strings" + + "github.com/IBM/sarama" + "github.com/argoproj/argo-events/common" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "go.uber.org/zap" +) + +type Kafka struct { + Logger *zap.SugaredLogger + config *eventbusv1alpha1.KafkaBus +} + +func NewKafka(config *eventbusv1alpha1.KafkaBus, logger *zap.SugaredLogger) *Kafka { + // set defaults + if config.ConsumerGroup == nil { + config.ConsumerGroup = &eventbusv1alpha1.KafkaConsumerGroup{} + } + + return &Kafka{ + Logger: logger, + config: config, + } +} + +func (k *Kafka) Brokers() []string { + return strings.Split(k.config.URL, ",") +} + +func (k *Kafka) Config() (*sarama.Config, error) { + config := sarama.NewConfig() + + // consumer config + config.Consumer.IsolationLevel = sarama.ReadCommitted + config.Consumer.Offsets.AutoCommit.Enable = false + + switch k.config.ConsumerGroup.StartOldest { + case true: + config.Consumer.Offsets.Initial = sarama.OffsetOldest + case false: + config.Consumer.Offsets.Initial = sarama.OffsetNewest + } + + switch k.config.ConsumerGroup.RebalanceStrategy { + case "sticky": + config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()} + case "roundrobin": + config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()} + default: + config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()} + } + + // producer config + config.Producer.Idempotent = true + config.Producer.RequiredAcks = sarama.WaitForAll + config.Net.MaxOpenRequests = 1 + + // common config + if k.config.Version != "" { + version, err := sarama.ParseKafkaVersion(k.config.Version) + if err != nil { + return nil, err + } + + config.Version = version + } + + // sasl + if k.config.SASL != nil { + config.Net.SASL.Enable = true + config.Net.SASL.Mechanism = sarama.SASLMechanism(k.config.SASL.GetMechanism()) + + switch config.Net.SASL.Mechanism { + case "SCRAM-SHA-512": + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA512New} + } + case "SCRAM-SHA-256": + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA256New} + } + } + + user, err := common.GetSecretFromVolume(k.config.SASL.UserSecret) + if err != nil { + return nil, err + } + config.Net.SASL.User = user + + password, err := common.GetSecretFromVolume(k.config.SASL.PasswordSecret) + if err != nil { + return nil, err + } + config.Net.SASL.Password = password + } + + // tls + if k.config.TLS != nil { + tls, err := common.GetTLSConfig(k.config.TLS) + if err != nil { + return nil, err + } + + config.Net.TLS.Config = tls + config.Net.TLS.Enable = true + } + + return config, nil +} diff --git a/eventbus/kafka/base/kafka_conn.go b/eventbus/kafka/base/kafka_conn.go new file mode 100644 index 0000000000..ea3072dba7 --- /dev/null +++ b/eventbus/kafka/base/kafka_conn.go @@ -0,0 +1,13 @@ +package base + +import "go.uber.org/zap" + +type KafkaConnection struct { + Logger *zap.SugaredLogger +} + +func NewKafkaConnection(logger *zap.SugaredLogger) *KafkaConnection { + return &KafkaConnection{ + Logger: logger, + } +} diff --git a/eventbus/kafka/base/kafka_test.go b/eventbus/kafka/base/kafka_test.go new file mode 100644 index 0000000000..06e70d751c --- /dev/null +++ b/eventbus/kafka/base/kafka_test.go @@ -0,0 +1,105 @@ +package base + +import ( + "testing" + + "github.com/IBM/sarama" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "github.com/stretchr/testify/assert" + "go.uber.org/zap" +) + +func TestBrokers(t *testing.T) { + config := &eventbusv1alpha1.KafkaBus{ + URL: "broker1:9092,broker2:9092", + } + + logger := zap.NewNop().Sugar() + kafka := NewKafka(config, logger) + + expectedBrokers := []string{"broker1:9092", "broker2:9092"} + actualBrokers := kafka.Brokers() + + assert.Equal(t, expectedBrokers, actualBrokers) +} + +func TestConfig(t *testing.T) { + config := &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + } + + logger := zap.NewNop().Sugar() + + kafka := NewKafka(config, logger) + + saramaConfig, err := kafka.Config() + + assert.NoError(t, err) + assert.NotNil(t, saramaConfig) + assert.Equal(t, sarama.OffsetNewest, saramaConfig.Consumer.Offsets.Initial) + assert.Equal(t, sarama.WaitForAll, saramaConfig.Producer.RequiredAcks) +} + +func TestConfig_StartOldest(t *testing.T) { + config := &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + ConsumerGroup: &eventbusv1alpha1.KafkaConsumerGroup{ + StartOldest: true, + }, + } + + logger := zap.NewNop().Sugar() + + kafka := NewKafka(config, logger) + + saramaConfig, err := kafka.Config() + + assert.NoError(t, err) + assert.NotNil(t, saramaConfig) + assert.Equal(t, sarama.OffsetOldest, saramaConfig.Consumer.Offsets.Initial) +} + +func TestConfig_NoSASL(t *testing.T) { + config := &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + SASL: nil, + } + + logger := zap.NewNop().Sugar() + + kafka := NewKafka(config, logger) + + saramaConfig, err := kafka.Config() + + assert.NoError(t, err) + assert.NotNil(t, saramaConfig) + assert.False(t, saramaConfig.Net.SASL.Enable) +} + +func TestNewKafka(t *testing.T) { + config := &eventbusv1alpha1.KafkaBus{ + URL: "localhost:9092", + } + + logger := zap.NewNop().Sugar() + + kafka := NewKafka(config, logger) + + assert.NotNil(t, kafka) + assert.NotNil(t, kafka.Logger) + assert.NotNil(t, kafka.config) +} + +func TestNewKafka_EmptyURL(t *testing.T) { + config := &eventbusv1alpha1.KafkaBus{ + URL: "", + } + + logger := zap.NewNop().Sugar() + + kafka := NewKafka(config, logger) + + assert.NotNil(t, kafka) + assert.NotNil(t, kafka.Logger) + assert.NotNil(t, kafka.config) +} diff --git a/eventbus/kafka/base/utils.go b/eventbus/kafka/base/utils.go new file mode 100644 index 0000000000..e52490d51e --- /dev/null +++ b/eventbus/kafka/base/utils.go @@ -0,0 +1,51 @@ +package base + +import ( + "fmt" + "time" +) + +func EventKey(source string, subject string) string { + return fmt.Sprintf("%s.%s", source, subject) +} + +// Batch returns a read only channel that receives values from the +// input channel batched together into a slice. A value is sent to +// the output channel when the slice reaches n elements, or d time +// has elapsed, whichever happens first. Ordering is maintained. +func Batch[T any](n int, d time.Duration, in <-chan T) <-chan []T { + out := make(chan []T, 1) + + go func() { + batch := []T{} + timer := time.NewTimer(d) + timer.Stop() + + defer close(out) + defer timer.Stop() + + for { + select { + case item, ok := <-in: + if !ok { + return + } + if len(batch) == 0 { + timer.Reset(d) + } + if batch = append(batch, item); len(batch) == n { + timer.Stop() + out <- batch + batch = nil + } + case <-timer.C: + if len(batch) > 0 { + out <- batch + batch = nil + } + } + } + }() + + return out +} diff --git a/eventbus/kafka/base/utils_test.go b/eventbus/kafka/base/utils_test.go new file mode 100644 index 0000000000..be8359396b --- /dev/null +++ b/eventbus/kafka/base/utils_test.go @@ -0,0 +1,82 @@ +package base + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestBatchDurationReached(t *testing.T) { + in := make(chan int) + defer close(in) + + out := Batch(5, 1*time.Second, in) + + t0 := time.Now() + in <- 0 + assert.Equal(t, []int{0}, <-out) + assert.Equal(t, time.Second, time.Since(t0).Truncate(time.Second)) + + t1 := time.Now() + in <- 1 + in <- 2 + assert.Equal(t, []int{1, 2}, <-out) + assert.Equal(t, time.Second, time.Since(t1).Truncate(time.Second)) + + t2 := time.Now() + in <- 3 + in <- 4 + in <- 5 + assert.Equal(t, []int{3, 4, 5}, <-out) + assert.Equal(t, time.Second, time.Since(t2).Truncate(time.Second)) +} + +func TestBatchSizeReached(t *testing.T) { + in := make(chan int) + defer close(in) + + out := Batch(2, 1*time.Second, in) + + t0 := time.Now() + in <- 0 + in <- 1 + assert.Equal(t, <-out, []int{0, 1}) + assert.Equal(t, time.Duration(0), time.Since(t0).Truncate(time.Second)) + + t1 := time.Now() + in <- 2 + in <- 3 + in <- 4 + in <- 5 + assert.Equal(t, []int{2, 3}, <-out) + assert.Equal(t, []int{4, 5}, <-out) + assert.Equal(t, time.Duration(0), time.Since(t1).Truncate(time.Second)) +} + +func TestBatchMaintainsOrder(t *testing.T) { + in := make(chan string) + defer close(in) + + out := Batch(10, 1*time.Second, in) + + in <- "a" + in <- "b" + in <- "c" + in <- "d" + in <- "e" + in <- "f" + in <- "g" + in <- "h" + in <- "i" + in <- "j" + assert.Equal(t, []string{"a", "b", "c", "d", "e", "f", "g", "h", "i", "j"}, <-out) +} + +func TestBatchChannelCleanedUp(t *testing.T) { + in := make(chan string) + out := Batch(10, 1*time.Second, in) + + close(in) + assert.Equal(t, []string(nil), <-out) +} diff --git a/eventbus/kafka/eventsource/source_conn.go b/eventbus/kafka/eventsource/source_conn.go new file mode 100644 index 0000000000..f9e79925d6 --- /dev/null +++ b/eventbus/kafka/eventsource/source_conn.go @@ -0,0 +1,55 @@ +package eventsource + +import ( + "context" + + "github.com/IBM/sarama" + "github.com/argoproj/argo-events/eventbus/common" + "github.com/argoproj/argo-events/eventbus/kafka/base" + "go.uber.org/zap" +) + +type KafkaSourceConnection struct { + *base.KafkaConnection + Topic string + Client sarama.Client + Producer sarama.SyncProducer +} + +func (c *KafkaSourceConnection) Publish(ctx context.Context, msg common.Message) error { + key := base.EventKey(msg.EventSourceName, msg.EventName) + partition, offset, err := c.Producer.SendMessage(&sarama.ProducerMessage{ + Topic: c.Topic, + Key: sarama.StringEncoder(key), + Value: sarama.ByteEncoder(msg.Body), + }) + + if err != nil { + // fail fast if topic does not exist + if err == sarama.ErrUnknownTopicOrPartition { + c.Logger.Fatalf( + "Topic does not exist. Please ensure the topic '%s' has been created, or the kafka setting '%s' is set to true.", + c.Topic, + "auto.create.topics.enable", + ) + } + + return err + } + + c.Logger.Infow("Published message to kafka", zap.String("topic", c.Topic), zap.String("key", key), zap.Int32("partition", partition), zap.Int64("offset", offset)) + + return nil +} + +func (c *KafkaSourceConnection) Close() error { + if err := c.Producer.Close(); err != nil { + return err + } + + return c.Client.Close() +} + +func (c *KafkaSourceConnection) IsClosed() bool { + return c.Client.Closed() +} diff --git a/eventbus/kafka/eventsource/source_kafka.go b/eventbus/kafka/eventsource/source_kafka.go new file mode 100644 index 0000000000..4b61562c2a --- /dev/null +++ b/eventbus/kafka/eventsource/source_kafka.go @@ -0,0 +1,55 @@ +package eventsource + +import ( + "github.com/IBM/sarama" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + "github.com/argoproj/argo-events/eventbus/kafka/base" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + "go.uber.org/zap" +) + +type KafkaSource struct { + *base.Kafka + topic string +} + +func NewKafkaSource(config *eventbusv1alpha1.KafkaBus, logger *zap.SugaredLogger) *KafkaSource { + return &KafkaSource{ + Kafka: base.NewKafka(config, logger), + topic: config.Topic, + } +} + +func (s *KafkaSource) Initialize() error { + return nil +} + +func (s *KafkaSource) Connect(string) (eventbuscommon.EventSourceConnection, error) { + config, err := s.Config() + if err != nil { + return nil, err + } + + // eventsource specific config + config.Producer.Return.Errors = true + config.Producer.Return.Successes = true + + client, err := sarama.NewClient(s.Brokers(), config) + if err != nil { + return nil, err + } + + producer, err := sarama.NewSyncProducerFromClient(client) + if err != nil { + return nil, err + } + + conn := &KafkaSourceConnection{ + KafkaConnection: base.NewKafkaConnection(s.Logger), + Topic: s.topic, + Client: client, + Producer: producer, + } + + return conn, nil +} diff --git a/eventbus/kafka/sensor/kafka_handler.go b/eventbus/kafka/sensor/kafka_handler.go new file mode 100644 index 0000000000..a14512c86f --- /dev/null +++ b/eventbus/kafka/sensor/kafka_handler.go @@ -0,0 +1,232 @@ +package kafka + +import ( + "encoding/json" + "fmt" + "sync" + "time" + + "github.com/IBM/sarama" + "github.com/argoproj/argo-events/eventbus/kafka/base" + "go.uber.org/zap" +) + +type KafkaHandler struct { + *sync.Mutex + Logger *zap.SugaredLogger + + // kafka details + GroupName string + Producer sarama.AsyncProducer + OffsetManager sarama.OffsetManager + TriggerTopic string + + // handler functions + // one function for each consumed topic, return messages, an + // offset and an optional function that will in a transaction + Handlers map[string]func(*sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()) + + // cleanup function + // used to clear state when consumer group is rebalanced + Reset func() error + + // maintains a mapping of keys (which correspond to triggers) + // to offsets, used to ensure triggers aren't invoked twice + checkpoints Checkpoints +} + +type Checkpoints map[string]map[int32]*Checkpoint + +type Checkpoint struct { + Logger *zap.SugaredLogger + Init bool + Offsets map[string]int64 +} + +func (c *Checkpoint) Skip(key string, offset int64) bool { + if c.Offsets == nil { + return false + } + return offset < c.Offsets[key] +} + +func (c *Checkpoint) Set(key string, offset int64) { + if c.Offsets == nil { + c.Offsets = map[string]int64{} + } + c.Offsets[key] = offset +} + +func (c *Checkpoint) Metadata() string { + if c.Offsets == nil { + return "" + } + + metadata, err := json.Marshal(c.Offsets) + if err != nil { + c.Logger.Errorw("Failed to serialize metadata", err) + return "" + } + + return string(metadata) +} + +func (h *KafkaHandler) Setup(session sarama.ConsumerGroupSession) error { + h.Logger.Infow("Kafka setup", zap.Any("claims", session.Claims())) + + // instantiates checkpoints for all topic/partitions managed by + // this claim + h.checkpoints = Checkpoints{} + + for topic, partitions := range session.Claims() { + h.checkpoints[topic] = map[int32]*Checkpoint{} + + for _, partition := range partitions { + partitionOffsetManager, err := h.OffsetManager.ManagePartition(topic, partition) + if err != nil { + return err + } + + func() { + var offsets map[string]int64 + + defer partitionOffsetManager.AsyncClose() + offset, metadata := partitionOffsetManager.NextOffset() + + // only need to manage the offsets for each trigger + // with respect to the trigger topic + if topic == h.TriggerTopic && metadata != "" { + if err := json.Unmarshal([]byte(metadata), &offsets); err != nil { + // if metadata is invalid json, it will be + // reset to an empty map + h.Logger.Errorw("Failed to deserialize metadata, resetting", err) + } + } + + h.checkpoints[topic][partition] = &Checkpoint{ + Logger: h.Logger, + Init: offset == -1, // mark offset when first message consumed + Offsets: offsets, + } + }() + + h.OffsetManager.Commit() + if err := partitionOffsetManager.Close(); err != nil { + return err + } + } + } + + return nil +} + +func (h *KafkaHandler) Cleanup(session sarama.ConsumerGroupSession) error { + h.Logger.Infow("Kafka cleanup", zap.Any("claims", session.Claims())) + return h.Reset() +} + +func (h *KafkaHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { + handler, ok := h.Handlers[claim.Topic()] + if !ok { + return fmt.Errorf("unrecognized topic %s", claim.Topic()) + } + + checkpoint, ok := h.checkpoints[claim.Topic()][claim.Partition()] + if !ok { + return fmt.Errorf("unrecognized topic %s or partition %d", claim.Topic(), claim.Partition()) + } + + // Batch messsages from the claim message channel. A message will + // be produced to the batched channel if the max batch size is + // reached or the time limit has elapsed, whichever happens + // first. Batching helps optimize kafka transactions. + batch := base.Batch(100, 1*time.Second, claim.Messages()) + + for { + select { + case msgs := <-batch: + if len(msgs) == 0 { + h.Logger.Warn("Kafka batch contains no messages") + continue + } + + transaction := &KafkaTransaction{ + Logger: h.Logger, + Producer: h.Producer, + GroupName: h.GroupName, + Topic: claim.Topic(), + Partition: claim.Partition(), + ResetOffset: msgs[0].Offset, + ResetMetadata: checkpoint.Metadata(), + } + + var messages []*sarama.ProducerMessage + var offset int64 + var fns []func() + + for _, msg := range msgs { + key := string(msg.Key) + + h.Logger.Infow("Received message", + zap.String("topic", msg.Topic), + zap.String("key", key), + zap.Int32("partition", msg.Partition), + zap.Int64("offset", msg.Offset)) + + if checkpoint.Init { + // mark offset in order to reconsume from this + // offset if a restart occurs + session.MarkOffset(msg.Topic, msg.Partition, msg.Offset, "") + session.Commit() + checkpoint.Init = false + } + + if checkpoint.Skip(key, msg.Offset) { + h.Logger.Infof("Skipping trigger '%s' (%d<%d)", key, msg.Offset, checkpoint.Offsets[key]) + continue + } + + m, o, f := handler(msg) + if msg.Topic == h.TriggerTopic && len(m) > 0 { + // when a trigger is invoked (there is a message) + // update the checkpoint to ensure the trigger + // is not re-invoked in the case of a restart + checkpoint.Set(key, msg.Offset+1) + } + + // update transacation information + messages = append(messages, m...) + offset = o + if f != nil { + fns = append(fns, f) + } + } + + func() { + h.Lock() + defer h.Unlock() + if err := transaction.Commit(session, messages, offset, checkpoint.Metadata()); err != nil { + h.Logger.Errorw("Transaction error", zap.Error(err)) + } + }() + + // invoke (action) functions asynchronously + for _, fn := range fns { + go fn() + } + case <-session.Context().Done(): + return nil + } + } +} + +func (h *KafkaHandler) Close() error { + h.Lock() + defer h.Unlock() + + if err := h.OffsetManager.Close(); err != nil { + return err + } + + return h.Producer.Close() +} diff --git a/eventbus/kafka/sensor/kafka_sensor.go b/eventbus/kafka/sensor/kafka_sensor.go new file mode 100644 index 0000000000..1f39403701 --- /dev/null +++ b/eventbus/kafka/sensor/kafka_sensor.go @@ -0,0 +1,385 @@ +package kafka + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/IBM/sarama" + "github.com/Knetic/govaluate" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + "github.com/argoproj/argo-events/eventbus/kafka/base" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" +) + +type KafkaSensor struct { + *base.Kafka + *sync.Mutex + sensor *sensorv1alpha1.Sensor + + // kafka details + topics *Topics + client sarama.Client + consumer sarama.ConsumerGroup + hostname string + groupName string + + // triggers handlers + // holds the state of all sensor triggers + triggers Triggers + + // kafka handler + // handles consuming from kafka, offsets, and transactions + kafkaHandler *KafkaHandler + connected bool +} + +func NewKafkaSensor(kafkaConfig *eventbusv1alpha1.KafkaBus, sensor *sensorv1alpha1.Sensor, hostname string, logger *zap.SugaredLogger) *KafkaSensor { + topics := &Topics{ + event: kafkaConfig.Topic, + trigger: fmt.Sprintf("%s-%s-%s", kafkaConfig.Topic, sensor.Name, "trigger"), + action: fmt.Sprintf("%s-%s-%s", kafkaConfig.Topic, sensor.Name, "action"), + } + + var groupName string + if kafkaConfig.ConsumerGroup == nil || kafkaConfig.ConsumerGroup.GroupName == "" { + groupName = fmt.Sprintf("%s-%s", sensor.Namespace, sensor.Name) + } else { + groupName = kafkaConfig.ConsumerGroup.GroupName + } + + return &KafkaSensor{ + Kafka: base.NewKafka(kafkaConfig, logger), + Mutex: &sync.Mutex{}, + sensor: sensor, + topics: topics, + hostname: hostname, + groupName: groupName, + triggers: Triggers{}, + } +} + +type Topics struct { + event string + trigger string + action string +} + +func (t *Topics) List() []string { + return []string{t.event, t.trigger, t.action} +} + +type Triggers map[string]KafkaTriggerHandler + +type TriggerWithDepName struct { + KafkaTriggerHandler + depName string +} + +func (t Triggers) List(event *cloudevents.Event) []*TriggerWithDepName { + triggers := []*TriggerWithDepName{} + + for _, trigger := range t { + if depName, ok := trigger.DependsOn(event); ok { + triggers = append(triggers, &TriggerWithDepName{trigger, depName}) + } + } + + return triggers +} + +func (t Triggers) Ready() bool { + for _, trigger := range t { + if !trigger.Ready() { + return false + } + } + return true +} + +func (s *KafkaSensor) Initialize() error { + config, err := s.Config() + if err != nil { + return err + } + + // sensor specific config + config.Producer.Transaction.ID = s.hostname + + client, err := sarama.NewClient(s.Brokers(), config) + if err != nil { + return err + } + + consumer, err := sarama.NewConsumerGroupFromClient(s.groupName, client) + if err != nil { + return err + } + + producer, err := sarama.NewAsyncProducerFromClient(client) + if err != nil { + return err + } + + offsetManager, err := sarama.NewOffsetManagerFromClient(s.groupName, client) + if err != nil { + return err + } + + // producer is at risk of deadlocking if Errors channel isn't read. + go func() { + for err := range producer.Errors() { + s.Logger.Errorf("Kafka producer error", zap.Error(err)) + } + }() + + s.client = client + s.consumer = consumer + s.kafkaHandler = &KafkaHandler{ + Mutex: &sync.Mutex{}, + Logger: s.Logger, + GroupName: s.groupName, + Producer: producer, + OffsetManager: offsetManager, + TriggerTopic: s.topics.trigger, + Reset: s.Reset, + Handlers: map[string]func(*sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()){ + s.topics.event: s.Event, + s.topics.trigger: s.Trigger, + s.topics.action: s.Action, + }, + } + + return nil +} + +func (s *KafkaSensor) Connect(ctx context.Context, triggerName string, depExpression string, dependencies []eventbuscommon.Dependency, atLeastOnce bool) (eventbuscommon.TriggerConnection, error) { + s.Lock() + defer s.Unlock() + + // connect only if disconnected, if ever the connection is lost + // the connected boolean will flip and the sensor listener will + // attempt to reconnect by invoking this function again + if !s.connected { + go s.Listen(ctx) + s.connected = true + } + + if _, ok := s.triggers[triggerName]; !ok { + expr, err := govaluate.NewEvaluableExpression(strings.ReplaceAll(depExpression, "-", "\\-")) + if err != nil { + return nil, err + } + + depMap := map[string]eventbuscommon.Dependency{} + for _, dep := range dependencies { + depMap[base.EventKey(dep.EventSourceName, dep.EventName)] = dep + } + + s.triggers[triggerName] = &KafkaTriggerConnection{ + KafkaConnection: base.NewKafkaConnection(s.Logger), + sensorName: s.sensor.Name, + triggerName: triggerName, + depExpression: expr, + dependencies: depMap, + atLeastOnce: atLeastOnce, + close: s.Close, + isClosed: s.IsClosed, + } + } + + return s.triggers[triggerName], nil +} + +func (s *KafkaSensor) Listen(ctx context.Context) { + defer s.Disconnect() + + for { + if len(s.triggers) != len(s.sensor.Spec.Triggers) || !s.triggers.Ready() { + s.Logger.Info("Not ready to consume, waiting...") + time.Sleep(3 * time.Second) + continue + } + + s.Logger.Infow("Consuming", zap.Strings("topics", s.topics.List()), zap.String("group", s.groupName)) + + if err := s.consumer.Consume(ctx, s.topics.List(), s.kafkaHandler); err != nil { + // fail fast if topics do not exist + if err == sarama.ErrUnknownTopicOrPartition { + s.Logger.Fatalf( + "Topics do not exist. Please ensure the topics '%s' have been created, or the kafka setting '%s' is set to true.", + s.topics.List(), + "auto.create.topics.enable", + ) + } + + s.Logger.Errorw("Failed to consume", zap.Error(err)) + return + } + + if err := ctx.Err(); err != nil { + s.Logger.Errorw("Kafka error", zap.Error(err)) + return + } + } +} + +func (s *KafkaSensor) Disconnect() { + s.Lock() + defer s.Unlock() + + s.connected = false +} + +func (s *KafkaSensor) Close() error { + s.Lock() + defer s.Unlock() + + // protect against being called multiple times + if s.IsClosed() { + return nil + } + + if err := s.consumer.Close(); err != nil { + return err + } + + if err := s.kafkaHandler.Close(); err != nil { + return err + } + + return s.client.Close() +} + +func (s *KafkaSensor) IsClosed() bool { + return !s.connected || s.client.Closed() +} + +func (s *KafkaSensor) Event(msg *sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()) { + var event *cloudevents.Event + if err := json.Unmarshal(msg.Value, &event); err != nil { + s.Logger.Errorw("Failed to deserialize cloudevent, skipping", zap.Error(err)) + return nil, msg.Offset + 1, nil + } + + messages := []*sarama.ProducerMessage{} + for _, trigger := range s.triggers.List(event) { + event, err := trigger.Transform(trigger.depName, event) + if err != nil { + s.Logger.Errorw("Failed to transform cloudevent, skipping", zap.Error(err)) + continue + } + + if !trigger.Filter(trigger.depName, event) { + s.Logger.Debug("Filter condition satisfied, skipping") + continue + } + + // if the trigger only requires one message to be invoked we + // can skip ahead to the action topic, otherwise produce to + // the trigger topic + + var data any + var topic string + if trigger.OneAndDone() { + data = []*cloudevents.Event{event} + topic = s.topics.action + } else { + data = event + topic = s.topics.trigger + } + + value, err := json.Marshal(data) + if err != nil { + s.Logger.Errorw("Failed to serialize cloudevent, skipping", zap.Error(err)) + continue + } + + messages = append(messages, &sarama.ProducerMessage{ + Topic: topic, + Key: sarama.StringEncoder(trigger.Name()), + Value: sarama.ByteEncoder(value), + }) + } + + return messages, msg.Offset + 1, nil +} + +func (s *KafkaSensor) Trigger(msg *sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()) { + var event *cloudevents.Event + if err := json.Unmarshal(msg.Value, &event); err != nil { + // do not return here as we still need to call trigger.Offset + // below to determine current offset + s.Logger.Errorw("Failed to deserialize cloudevent, skipping", zap.Error(err)) + } + + messages := []*sarama.ProducerMessage{} + offset := msg.Offset + 1 + + // update trigger with new event and add any resulting action to + // transaction messages + if trigger, ok := s.triggers[string(msg.Key)]; ok && event != nil { + func() { + events, err := trigger.Update(event, msg.Partition, msg.Offset, msg.Timestamp) + if err != nil { + s.Logger.Errorw("Failed to update trigger, skipping", zap.Error(err)) + return + } + + // no events, trigger not yet satisfied + if events == nil { + return + } + + value, err := json.Marshal(events) + if err != nil { + s.Logger.Errorw("Failed to serialize cloudevent, skipping", zap.Error(err)) + return + } + + messages = append(messages, &sarama.ProducerMessage{ + Topic: s.topics.action, + Key: sarama.StringEncoder(trigger.Name()), + Value: sarama.ByteEncoder(value), + }) + }() + } + + // need to determine smallest possible offset against all + // triggers as other triggers may have messages that land on the + // same partition + for _, trigger := range s.triggers { + offset = trigger.Offset(msg.Partition, offset) + } + + return messages, offset, nil +} + +func (s *KafkaSensor) Action(msg *sarama.ConsumerMessage) ([]*sarama.ProducerMessage, int64, func()) { + var events []*cloudevents.Event + if err := json.Unmarshal(msg.Value, &events); err != nil { + s.Logger.Errorw("Failed to deserialize cloudevents, skipping", zap.Error(err)) + return nil, msg.Offset + 1, nil + } + + var f func() + if trigger, ok := s.triggers[string(msg.Key)]; ok { + f = trigger.Action(events) + } + + return nil, msg.Offset + 1, f +} + +func (s *KafkaSensor) Reset() error { + for _, trigger := range s.triggers { + trigger.Reset() + } + + return nil +} diff --git a/eventbus/kafka/sensor/kafka_transaction.go b/eventbus/kafka/sensor/kafka_transaction.go new file mode 100644 index 0000000000..fce906e376 --- /dev/null +++ b/eventbus/kafka/sensor/kafka_transaction.go @@ -0,0 +1,99 @@ +package kafka + +import ( + "github.com/IBM/sarama" + "go.uber.org/zap" +) + +type KafkaTransaction struct { + Logger *zap.SugaredLogger + + // kafka details + Producer sarama.AsyncProducer + GroupName string + Topic string + Partition int32 + + // used to reset the offset and metadata if transaction fails + ResetOffset int64 + ResetMetadata string +} + +func (t *KafkaTransaction) Commit(session sarama.ConsumerGroupSession, messages []*sarama.ProducerMessage, offset int64, metadata string) error { + // No need for a transaction if no messages, just update the + // offset and metadata + if len(messages) == 0 { + session.MarkOffset(t.Topic, t.Partition, offset, metadata) + session.Commit() + return nil + } + + t.Logger.Infow("Begin transaction", + zap.String("topic", t.Topic), + zap.Int32("partition", t.Partition), + zap.Int("messages", len(messages))) + + if err := t.Producer.BeginTxn(); err != nil { + return err + } + + for _, msg := range messages { + t.Producer.Input() <- msg + } + + offsets := map[string][]*sarama.PartitionOffsetMetadata{ + t.Topic: {{ + Partition: t.Partition, + Offset: offset, + Metadata: &metadata, + }}, + } + + if err := t.Producer.AddOffsetsToTxn(offsets, t.GroupName); err != nil { + t.Logger.Errorw("Kafka transaction error", zap.Error(err)) + t.handleTxnError(session, func() error { + return t.Producer.AddOffsetsToTxn(offsets, t.GroupName) + }) + } + + if err := t.Producer.CommitTxn(); err != nil { + t.Logger.Errorw("Kafka transaction error", zap.Error(err)) + t.handleTxnError(session, func() error { + return t.Producer.CommitTxn() + }) + } + + t.Logger.Infow("Finished transaction", + zap.String("topic", t.Topic), + zap.Int32("partition", t.Partition)) + + return nil +} + +func (t *KafkaTransaction) handleTxnError(session sarama.ConsumerGroupSession, defaulthandler func() error) { + for { + if t.Producer.TxnStatus()&sarama.ProducerTxnFlagFatalError != 0 { + // reset current consumer offset to retry consume this record + session.ResetOffset(t.Topic, t.Partition, t.ResetOffset, t.ResetMetadata) + // fatal error, need to restart + t.Logger.Fatal("Message consumer: t.Producer is in a fatal state.") + return + } + if t.Producer.TxnStatus()&sarama.ProducerTxnFlagAbortableError != 0 { + if err := t.Producer.AbortTxn(); err != nil { + t.Logger.Errorw("Message consumer: unable to abort transaction.", zap.Error(err)) + continue + } + // reset current consumer offset to retry consume this record + session.ResetOffset(t.Topic, t.Partition, t.ResetOffset, t.ResetMetadata) + // fatal error, need to restart + t.Logger.Fatal("Message consumer: t.Producer is in a fatal state, aborted transaction.") + return + } + + // attempt retry + if err := defaulthandler(); err == nil { + return + } + } +} diff --git a/eventbus/kafka/sensor/trigger_conn.go b/eventbus/kafka/sensor/trigger_conn.go new file mode 100644 index 0000000000..cacf973f69 --- /dev/null +++ b/eventbus/kafka/sensor/trigger_conn.go @@ -0,0 +1,94 @@ +package kafka + +import ( + "context" + "fmt" + "time" + + "github.com/Knetic/govaluate" + "github.com/argoproj/argo-events/eventbus/common" + "github.com/argoproj/argo-events/eventbus/kafka/base" + cloudevents "github.com/cloudevents/sdk-go/v2" +) + +type KafkaTriggerConnection struct { + *base.KafkaConnection + KafkaTriggerHandler + + sensorName string + triggerName string + depExpression *govaluate.EvaluableExpression + dependencies map[string]common.Dependency + atLeastOnce bool + + // functions + close func() error + isClosed func() bool + transform func(string, cloudevents.Event) (*cloudevents.Event, error) + filter func(string, cloudevents.Event) bool + action func(map[string]cloudevents.Event) + + // state + events []*eventWithMetadata + lastResetTime time.Time +} + +type eventWithMetadata struct { + *cloudevents.Event + partition int32 + offset int64 + timestamp time.Time +} + +func (e1 *eventWithMetadata) Same(e2 *eventWithMetadata) bool { + return e1.Source() == e2.Source() && e1.Subject() == e2.Subject() +} + +func (e *eventWithMetadata) After(t time.Time) bool { + return t.IsZero() || e.timestamp.After(t) +} + +func (c *KafkaTriggerConnection) String() string { + return fmt.Sprintf("KafkaTriggerConnection{Sensor:%s,Trigger:%s}", c.sensorName, c.triggerName) +} + +func (c *KafkaTriggerConnection) Close() error { + if c.close == nil { + return fmt.Errorf("can't close Kafka trigger connection, close function is nil") + } + return c.close() +} + +func (c *KafkaTriggerConnection) IsClosed() bool { + return c.isClosed == nil || c.isClosed() +} + +func (c *KafkaTriggerConnection) Subscribe( + ctx context.Context, + closeCh <-chan struct{}, + resetConditionsCh <-chan struct{}, + lastResetTime time.Time, + transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), + filter func(string, cloudevents.Event) bool, + action func(map[string]cloudevents.Event), + topic *string) error { + c.transform = transform + c.filter = filter + c.action = action + c.lastResetTime = lastResetTime + + for { + select { + case <-ctx.Done(): + return c.Close() + case <-closeCh: + // this is a noop since a kafka connection is maintained + // on the overall sensor vs indididual triggers + return nil + case <-resetConditionsCh: + // trigger update will filter out all events that occurred + // before this time + c.lastResetTime = time.Now() + } + } +} diff --git a/eventbus/kafka/sensor/trigger_handler.go b/eventbus/kafka/sensor/trigger_handler.go new file mode 100644 index 0000000000..c89f104daf --- /dev/null +++ b/eventbus/kafka/sensor/trigger_handler.go @@ -0,0 +1,156 @@ +package kafka + +import ( + "time" + + "github.com/Knetic/govaluate" + "github.com/argoproj/argo-events/eventbus/common" + "github.com/argoproj/argo-events/eventbus/kafka/base" + cloudevents "github.com/cloudevents/sdk-go/v2" + "go.uber.org/zap" +) + +type KafkaTriggerHandler interface { + common.TriggerConnection + Name() string + Ready() bool + Reset() + OneAndDone() bool + DependsOn(*cloudevents.Event) (string, bool) + Transform(string, *cloudevents.Event) (*cloudevents.Event, error) + Filter(string, *cloudevents.Event) bool + Update(event *cloudevents.Event, partition int32, offset int64, timestamp time.Time) ([]*cloudevents.Event, error) + Offset(int32, int64) int64 + Action([]*cloudevents.Event) func() +} + +func (c *KafkaTriggerConnection) Name() string { + return c.triggerName +} + +func (c *KafkaTriggerConnection) Ready() bool { + // cannot process events until the subscribe function has been + // called, which is when these functions are set + return c.transform != nil && c.filter != nil && c.action != nil +} + +func (c *KafkaTriggerConnection) DependsOn(event *cloudevents.Event) (string, bool) { + if dep, ok := c.dependencies[base.EventKey(event.Source(), event.Subject())]; ok { + return dep.Name, true + } + + return "", false +} + +func (c *KafkaTriggerConnection) OneAndDone() bool { + for _, token := range c.depExpression.Tokens() { + if token.Kind == govaluate.LOGICALOP && token.Value == "&&" { + return false + } + } + + return true +} + +func (c *KafkaTriggerConnection) Transform(depName string, event *cloudevents.Event) (*cloudevents.Event, error) { + return c.transform(depName, *event) +} + +func (c *KafkaTriggerConnection) Filter(depName string, event *cloudevents.Event) bool { + return c.filter(depName, *event) +} + +func (c *KafkaTriggerConnection) Update(event *cloudevents.Event, partition int32, offset int64, timestamp time.Time) ([]*cloudevents.Event, error) { + eventWithMetadata := &eventWithMetadata{ + Event: event, + partition: partition, + offset: offset, + timestamp: timestamp, + } + + // remove previous events with same source and subject and remove + // all events older than last condition reset time + i := 0 + for _, event := range c.events { + if !event.Same(eventWithMetadata) && event.After(c.lastResetTime) { + c.events[i] = event + i++ + } + } + for j := i; j < len(c.events); j++ { + c.events[j] = nil // avoid memory leak + } + c.events = append(c.events[:i], eventWithMetadata) + + satisfied, err := c.satisfied() + if err != nil { + return nil, err + } + + // if satisfied, publish a message to the action topic containing + // all events and reset the trigger + var events []*cloudevents.Event + if satisfied == true { + defer c.Reset() + for _, event := range c.events { + events = append(events, event.Event) + } + } + + return events, nil +} + +func (c *KafkaTriggerConnection) Offset(partition int32, offset int64) int64 { + for _, event := range c.events { + if partition == event.partition && offset > event.offset { + offset = event.offset + } + } + + return offset +} + +func (c *KafkaTriggerConnection) Action(events []*cloudevents.Event) func() { + eventMap := map[string]cloudevents.Event{} + for _, event := range events { + if depName, ok := c.DependsOn(event); ok { + eventMap[depName] = *event + } + } + + // If at least once is specified, we must call the action + // function before committing a transaction, otherwise the + // function must be called after. To call after we return a + // function. + var f func() + if c.atLeastOnce { + c.action(eventMap) + } else { + f = func() { c.action(eventMap) } + } + + return f +} + +func (c *KafkaTriggerConnection) satisfied() (interface{}, error) { + parameters := Parameters{} + for _, event := range c.events { + if depName, ok := c.DependsOn(event.Event); ok { + parameters[depName] = true + } + } + + c.Logger.Infow("Evaluating", zap.String("expr", c.depExpression.String()), zap.Any("parameters", parameters)) + + return c.depExpression.Eval(parameters) +} + +func (c *KafkaTriggerConnection) Reset() { + c.events = nil +} + +type Parameters map[string]bool + +func (p Parameters) Get(name string) (interface{}, error) { + return p[name], nil +} diff --git a/eventbus/stan/base/stan.go b/eventbus/stan/base/stan.go new file mode 100644 index 0000000000..37cda0676c --- /dev/null +++ b/eventbus/stan/base/stan.go @@ -0,0 +1,77 @@ +package base + +import ( + "fmt" + + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" + nats "github.com/nats-io/nats.go" + "github.com/nats-io/stan.go" + "go.uber.org/zap" +) + +type STAN struct { + url string + auth *eventbuscommon.Auth + clusterID string + + logger *zap.SugaredLogger +} + +// NewSTAN returns a nats streaming driver +func NewSTAN(url string, clusterID string, auth *eventbuscommon.Auth, logger *zap.SugaredLogger) *STAN { + return &STAN{ + url: url, + clusterID: clusterID, + auth: auth, + logger: logger, + } +} + +func (n *STAN) MakeConnection(clientID string) (*STANConnection, error) { + log := n.logger.With("clientID", clientID) + conn := &STANConnection{ClientID: clientID, Logger: n.logger} + opts := []nats.Option{ + // Do not reconnect here but handle reconnction outside + nats.NoReconnect(), + nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + conn.NATSConnected = false + log.Errorw("NATS connection lost", zap.Error(err)) + }), + nats.ReconnectHandler(func(nnc *nats.Conn) { + conn.NATSConnected = true + log.Info("Reconnected to NATS server") + }), + } + switch n.auth.Strategy { + case eventbusv1alpha1.AuthStrategyToken: + log.Info("NATS auth strategy: Token") + opts = append(opts, nats.Token(n.auth.Credential.Token)) + case eventbusv1alpha1.AuthStrategyNone: + log.Info("NATS auth strategy: None") + default: + return nil, fmt.Errorf("unsupported auth strategy") + } + nc, err := nats.Connect(n.url, opts...) + if err != nil { + log.Errorw("Failed to connect to NATS server", zap.Error(err)) + return nil, err + } + log.Info("Connected to NATS server.") + conn.NATSConn = nc + conn.NATSConnected = true + + sc, err := stan.Connect(n.clusterID, clientID, stan.NatsConn(nc), stan.Pings(5, 60), + stan.SetConnectionLostHandler(func(_ stan.Conn, reason error) { + conn.STANConnected = false + log.Errorw("NATS streaming connection lost", zap.Error(err)) + })) + if err != nil { + log.Errorw("Failed to connect to NATS streaming server", zap.Error(err)) + return nil, err + } + log.Info("Connected to NATS streaming server.") + conn.STANConn = sc + conn.STANConnected = true + return conn, nil +} diff --git a/eventbus/stan/base/stan_conn.go b/eventbus/stan/base/stan_conn.go new file mode 100644 index 0000000000..0a434413fb --- /dev/null +++ b/eventbus/stan/base/stan_conn.go @@ -0,0 +1,42 @@ +package base + +import ( + "fmt" + + nats "github.com/nats-io/nats.go" + "github.com/nats-io/stan.go" + "go.uber.org/zap" +) + +type STANConnection struct { + NATSConn *nats.Conn + STANConn stan.Conn + + NATSConnected bool + STANConnected bool + + // defaultSubject string + ClientID string + + Logger *zap.SugaredLogger +} + +func (nsc *STANConnection) Close() error { + if nsc == nil { + return fmt.Errorf("can't close STAN connection, STANConnection is nil") + } + if nsc.STANConn != nil { + err := nsc.STANConn.Close() + if err != nil { + return err + } + } + if nsc.NATSConn != nil && nsc.NATSConn.IsConnected() { + nsc.NATSConn.Close() + } + return nil +} + +func (nsc *STANConnection) IsClosed() bool { + return nsc == nil || nsc.NATSConn == nil || nsc.STANConn == nil || !nsc.NATSConnected || !nsc.STANConnected || nsc.NATSConn.IsClosed() +} diff --git a/eventbus/stan/eventsource/source_conn.go b/eventbus/stan/eventsource/source_conn.go new file mode 100644 index 0000000000..829d3fd79f --- /dev/null +++ b/eventbus/stan/eventsource/source_conn.go @@ -0,0 +1,34 @@ +package eventsource + +import ( + "context" + "fmt" + + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + stanbase "github.com/argoproj/argo-events/eventbus/stan/base" +) + +type STANSourceConn struct { + *stanbase.STANConnection + eventSourceName string + subject string +} + +func (n *STANSourceConn) Publish(ctx context.Context, + msg eventbuscommon.Message) error { + if n == nil { + return fmt.Errorf("Publish() failed; JetstreamSourceConn is nil") + } + return n.STANConn.Publish(n.subject, msg.Body) +} + +func (conn *STANSourceConn) IsClosed() bool { + return conn == nil || conn.STANConnection.IsClosed() +} + +func (conn *STANSourceConn) Close() error { + if conn == nil { + return fmt.Errorf("can't close STAN source connection, STANSourceConn is nil") + } + return conn.STANConnection.Close() +} diff --git a/eventbus/stan/eventsource/source_stan.go b/eventbus/stan/eventsource/source_stan.go new file mode 100644 index 0000000000..b845a025fe --- /dev/null +++ b/eventbus/stan/eventsource/source_stan.go @@ -0,0 +1,34 @@ +package eventsource + +import ( + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + stanbase "github.com/argoproj/argo-events/eventbus/stan/base" + "go.uber.org/zap" +) + +type SourceSTAN struct { + *stanbase.STAN + eventSourceName string + subject string +} + +func NewSourceSTAN(url, clusterID, eventSourceName string, subject string, auth *eventbuscommon.Auth, logger *zap.SugaredLogger) *SourceSTAN { + return &SourceSTAN{ + stanbase.NewSTAN(url, clusterID, auth, logger), + eventSourceName, + subject, + } +} + +func (n *SourceSTAN) Initialize() error { + return nil +} + +func (n *SourceSTAN) Connect(clientID string) (eventbuscommon.EventSourceConnection, error) { + conn, err := n.MakeConnection(clientID) + if err != nil { + return nil, err + } + + return &STANSourceConn{conn, n.eventSourceName, n.subject}, nil +} diff --git a/eventbus/stan/sensor/sensor_stan.go b/eventbus/stan/sensor/sensor_stan.go new file mode 100644 index 0000000000..cc29b29910 --- /dev/null +++ b/eventbus/stan/sensor/sensor_stan.go @@ -0,0 +1,44 @@ +package sensor + +import ( + "context" + "crypto/rand" + "fmt" + "math/big" + + "github.com/argoproj/argo-events/common" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + stanbase "github.com/argoproj/argo-events/eventbus/stan/base" + "go.uber.org/zap" +) + +type SensorSTAN struct { + *stanbase.STAN + sensorName string +} + +func NewSensorSTAN(url, clusterID, sensorName string, auth *eventbuscommon.Auth, logger *zap.SugaredLogger) *SensorSTAN { + return &SensorSTAN{ + stanbase.NewSTAN(url, clusterID, auth, logger), + sensorName, + } +} + +func (n *SensorSTAN) Initialize() error { + return nil +} + +func (n *SensorSTAN) Connect(ctx context.Context, triggerName string, dependencyExpression string, deps []eventbuscommon.Dependency, atLeastOnce bool) (eventbuscommon.TriggerConnection, error) { + // Generate clientID with hash code + hashKey := fmt.Sprintf("%s-%s-%s", n.sensorName, triggerName, dependencyExpression) + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(100))) + hashVal := common.Hasher(hashKey) + clientID := fmt.Sprintf("client-%v-%v", hashVal, randomNum.Int64()) + + conn, err := n.MakeConnection(clientID) + if err != nil { + return nil, err + } + + return NewSTANTriggerConn(conn, n.sensorName, triggerName, dependencyExpression, deps), nil +} diff --git a/eventbus/stan/sensor/trigger_conn.go b/eventbus/stan/sensor/trigger_conn.go new file mode 100644 index 0000000000..466e0bc511 --- /dev/null +++ b/eventbus/stan/sensor/trigger_conn.go @@ -0,0 +1,464 @@ +package sensor + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/Knetic/govaluate" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/gobwas/glob" + "github.com/nats-io/stan.go" + "github.com/nats-io/stan.go/pb" + "go.uber.org/zap" + + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + + stanbase "github.com/argoproj/argo-events/eventbus/stan/base" +) + +type STANTriggerConn struct { + *stanbase.STANConnection + + sensorName string + triggerName string + dependencyExpression string + deps []eventbuscommon.Dependency +} + +func NewSTANTriggerConn(conn *stanbase.STANConnection, sensorName string, triggerName string, dependencyExpression string, deps []eventbuscommon.Dependency) *STANTriggerConn { + n := &STANTriggerConn{conn, sensorName, triggerName, dependencyExpression, deps} + n.Logger = n.Logger.With("triggerName", n.triggerName).With("clientID", n.ClientID) + return n +} + +func (n *STANTriggerConn) String() string { + if n == nil { + return "" + } + return fmt.Sprintf("STANTriggerConn{ClientID:%s,Sensor:%s,Trigger:%s}", n.ClientID, n.sensorName, n.triggerName) +} + +func (conn *STANTriggerConn) IsClosed() bool { + return conn == nil || conn.STANConnection.IsClosed() +} + +func (conn *STANTriggerConn) Close() error { + if conn == nil { + return fmt.Errorf("can't close STAN trigger connection, STANTriggerConn is nil") + } + return conn.STANConnection.Close() +} + +// Subscribe is used to subscribe to multiple event source dependencies +// Parameter - ctx, context +// Parameter - conn, eventbus connection +// Parameter - group, queue group name +// Parameter - closeCh, channel to indicate to close the subscription +// Parameter - resetConditionsCh, channel to indicate to reset trigger conditions +// Parameter - lastResetTime, the last time reset would have occurred, if any +// Parameter - dependencyExpr, example: "(dep1 || dep2) && dep3" +// Parameter - dependencies, array of dependencies information +// Parameter - filter, a function used to filter the message +// Parameter - action, a function to be triggered after all conditions meet +func (n *STANTriggerConn) Subscribe( + ctx context.Context, + closeCh <-chan struct{}, + resetConditionsCh <-chan struct{}, + lastResetTime time.Time, + transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), + filter func(string, cloudevents.Event) bool, + action func(map[string]cloudevents.Event), + defaultSubject *string) error { + if n == nil { + return fmt.Errorf("Subscribe() failed; STANTriggerConn is nil") + } + + log := n.Logger + + if defaultSubject == nil { + log.Error("can't subscribe over NATS streaming: defaultSubject not set") + } + + msgHolder, err := newEventSourceMessageHolder(log, n.dependencyExpression, n.deps, lastResetTime) + if err != nil { + return err + } + // use group name as durable name + group, err := n.getGroupNameFromClientID(n.ClientID) + if err != nil { + return err + } + durableName := group + sub, err := n.STANConn.QueueSubscribe(*defaultSubject, group, func(m *stan.Msg) { + n.processEventSourceMsg(m, msgHolder, transform, filter, action, log) + }, stan.DurableName(durableName), + stan.SetManualAckMode(), + stan.StartAt(pb.StartPosition_NewOnly), + stan.AckWait(1*time.Second), + stan.MaxInflight(len(msgHolder.depNames)+2)) + if err != nil { + log.Errorf("failed to subscribe to subject %s", *defaultSubject) + return err + } + log.Infof("Subscribed to subject %s using durable name %s", *defaultSubject, durableName) + + // Daemon to evict cache and reset trigger conditions + wg := &sync.WaitGroup{} + daemonStopCh := make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + log.Info("starting ExactOnce cache clean up daemon ...") + ticker := time.NewTicker(60 * time.Second) + defer ticker.Stop() + for { + select { + case <-daemonStopCh: + log.Info("exiting ExactOnce cache clean up daemon...") + return + case <-ticker.C: + now := time.Now().UnixNano() + num := 0 + msgHolder.smap.Range(func(key, value interface{}) bool { + v := value.(int64) + // Evict cached ID older than 5 minutes + if now-v > 5*60*1000*1000*1000 { + msgHolder.smap.Delete(key) + num++ + log.Debugw("cached ID evicted", "id", key) + } + return true + }) + log.Debugf("finished evicting %v cached IDs, time cost: %v ms", num, (time.Now().UnixNano()-now)/1000/1000) + case <-resetConditionsCh: + log.Info("reset conditions") + msgHolder.setLastResetTime(time.Now()) + } + } + }() + + for { + select { + case <-ctx.Done(): + log.Info("exiting, unsubscribing and closing connection...") + _ = sub.Close() + log.Infof("subscription on subject %s closed", *defaultSubject) + daemonStopCh <- struct{}{} + wg.Wait() + return nil + case <-closeCh: + log.Info("closing subscription...") + _ = sub.Close() + log.Infof("subscription on subject %s closed", *defaultSubject) + daemonStopCh <- struct{}{} + wg.Wait() + return nil + } + } +} + +func (n *STANTriggerConn) processEventSourceMsg(m *stan.Msg, msgHolder *eventSourceMessageHolder, transform func(depName string, event cloudevents.Event) (*cloudevents.Event, error), filter func(dependencyName string, event cloudevents.Event) bool, action func(map[string]cloudevents.Event), log *zap.SugaredLogger) { + var event *cloudevents.Event + if err := json.Unmarshal(m.Data, &event); err != nil { + log.Errorf("Failed to convert to a cloudevent, discarding it... err: %v", err) + _ = m.Ack() + return + } + + depName, err := msgHolder.getDependencyName(event.Source(), event.Subject()) + if err != nil { + log.Errorf("Failed to get the dependency name, discarding it... err: %v", err) + _ = m.Ack() + return + } + + log.Debugf("New incoming Event Source Message, dependency name=%s", depName) + + if depName == "" { + _ = m.Ack() + return + } + + event, err = transform(depName, *event) + if err != nil { + log.Errorw("failed to apply event transformation", zap.Error(err)) + _ = m.Ack() + return + } + + if !filter(depName, *event) { + // message not interested + log.Debugf("not interested in dependency %s", depName) + _ = m.Ack() + return + } + + // NATS Streaming guarantees At Least Once delivery, + // so need to check if the message is duplicate + if _, ok := msgHolder.smap.Load(event.ID()); ok { + log.Infow("ATTENTION: Duplicate delivered message detected", "message", m) + _ = m.Ack() + return + } + + // Acknowledge any old messages that occurred before the last reset (standard reset after trigger or conditional reset) + if m.Timestamp <= msgHolder.getLastResetTime().UnixNano() { + if depName != "" { + msgHolder.reset(depName) + } + msgHolder.ackAndCache(m, event.ID()) + + log.Debugf("reset and acked dependency=%s due to message time occurred before reset, m.Timestamp=%d, msgHolder.getLastResetTime()=%d", + depName, m.Timestamp, msgHolder.getLastResetTime().UnixNano()) + return + } + // make sure that everything has been cleared within a certain amount of time + if msgHolder.fullResetTimeout() { + log.Infof("ATTENTION: Resetting the flags because they didn't get cleared before the timeout: msgHolder=%+v", msgHolder) + msgHolder.resetAll() + } + + now := time.Now().Unix() + + // Start a new round + if existingMsg, ok := msgHolder.msgs[depName]; ok { + if m.Timestamp == existingMsg.timestamp { + // Re-delivered latest messge, update delivery timestamp and return + existingMsg.lastDeliveredTime = now + msgHolder.msgs[depName] = existingMsg + log.Debugf("Updating timestamp for dependency=%s", depName) + return + } else if m.Timestamp < existingMsg.timestamp { + // Re-delivered old message, ack and return + msgHolder.ackAndCache(m, event.ID()) + log.Debugw("Dropping this message because later ones also satisfy", "eventID", event.ID()) + return + } + } + // New message, set and check + msgHolder.msgs[depName] = &eventSourceMessage{seq: m.Sequence, timestamp: m.Timestamp, event: event, lastDeliveredTime: now} + msgHolder.parameters[depName] = true + + // Check if there's any stale message being held. + // Stale message could be message age has been longer than NATS streaming max message age, + // which means it has ben deleted from NATS server side, but it's still held here. + // Use last delivery timestamp to determine that. + for k, v := range msgHolder.msgs { + // Since the message is not acked, the server will keep re-sending it. + // If a message being held didn't get re-delivered in the last 10 minutes, treat it as stale. + if (now - v.lastDeliveredTime) > 10*60 { + msgHolder.reset(k) + } + } + + result, err := msgHolder.expr.Evaluate(msgHolder.parameters) + if err != nil { + log.Errorf("failed to evaluate dependency expression: %v", err) + // TODO: how to handle this situation? + return + } + if result != true { + // Log current meet dependency information + meetDeps := []string{} + meetMsgIds := []string{} + for k, v := range msgHolder.msgs { + meetDeps = append(meetDeps, k) + meetMsgIds = append(meetMsgIds, v.event.ID()) + } + log.Infow("trigger conditions not met", zap.Any("meetDependencies", meetDeps), zap.Any("meetEvents", meetMsgIds)) + return + } + + msgHolder.setLastResetTime(time.Unix(m.Timestamp/1e9, m.Timestamp%1e9)) + // Trigger actions + messages := make(map[string]cloudevents.Event) + for k, v := range msgHolder.msgs { + messages[k] = *v.event + } + log.Debugf("Triggering actions for client %s", n.ClientID) + + action(messages) + + msgHolder.reset(depName) + msgHolder.ackAndCache(m, event.ID()) +} + +func (n *STANTriggerConn) getGroupNameFromClientID(clientID string) (string, error) { + log := n.Logger.With("clientID", n.ClientID) + // take off the last part: clientID should have a dash at the end and we can remove that part + strs := strings.Split(clientID, "-") + if len(strs) < 2 { + err := fmt.Errorf("Expected client ID to contain dash: %s", clientID) + log.Error(err) + return "", err + } + return strings.Join(strs[:len(strs)-1], "-"), nil +} + +// eventSourceMessage is used by messageHolder to hold the latest message +type eventSourceMessage struct { + seq uint64 + timestamp int64 + event *cloudevents.Event + // timestamp of last delivered + lastDeliveredTime int64 +} + +// eventSourceMessageHolder is a struct used to hold the message information of subscribed dependencies +type eventSourceMessageHolder struct { + // time that resets conditions, usually the time all conditions meet, + // or the time getting an external signal to reset. + lastResetTime time.Time + // if we reach this time, we reset everything (occurs 60 seconds after lastResetTime) + resetTimeout int64 + expr *govaluate.EvaluableExpression + depNames []string + // Mapping of [eventSourceName + eventName]dependencyName + sourceDepMap map[string]string + parameters map[string]interface{} + msgs map[string]*eventSourceMessage + // A sync map used to cache the message IDs, it is used to guarantee Exact Once triggering + smap *sync.Map + lock sync.RWMutex + timeoutLock sync.RWMutex + + logger *zap.SugaredLogger +} + +func newEventSourceMessageHolder(logger *zap.SugaredLogger, dependencyExpr string, dependencies []eventbuscommon.Dependency, lastResetTime time.Time) (*eventSourceMessageHolder, error) { + dependencyExpr = strings.ReplaceAll(dependencyExpr, "-", "\\-") + expression, err := govaluate.NewEvaluableExpression(dependencyExpr) + if err != nil { + return nil, err + } + deps := unique(expression.Vars()) + if len(dependencyExpr) == 0 { + return nil, fmt.Errorf("no dependencies found: %s", dependencyExpr) + } + + srcDepMap := make(map[string]string) + for _, d := range dependencies { + key := d.EventSourceName + "__" + d.EventName + srcDepMap[key] = d.Name + } + + parameters := make(map[string]interface{}, len(deps)) + msgs := make(map[string]*eventSourceMessage) + for _, dep := range deps { + parameters[dep] = false + } + + return &eventSourceMessageHolder{ + lastResetTime: lastResetTime, + expr: expression, + depNames: deps, + sourceDepMap: srcDepMap, + parameters: parameters, + msgs: msgs, + smap: new(sync.Map), + lock: sync.RWMutex{}, + logger: logger, + }, nil +} + +func (mh *eventSourceMessageHolder) getLastResetTime() time.Time { + mh.lock.RLock() + defer mh.lock.RUnlock() + return mh.lastResetTime +} + +func (mh *eventSourceMessageHolder) setLastResetTime(t time.Time) { + { + mh.lock.Lock() // since this can be called asyncronously as part of a ConditionReset, we neeed to lock this code + defer mh.lock.Unlock() + mh.lastResetTime = t + } + mh.setResetTimeout(t.Add(time.Second * 60).Unix()) // failsafe condition: determine if we for some reason we haven't acknowledged all dependencies within 60 seconds of the lastResetTime +} + +func (mh *eventSourceMessageHolder) setResetTimeout(t int64) { + mh.timeoutLock.Lock() // since this can be called asyncronously as part of a ConditionReset, we neeed to lock this code + defer mh.timeoutLock.Unlock() + mh.resetTimeout = t +} + +func (mh *eventSourceMessageHolder) getResetTimeout() int64 { + mh.timeoutLock.RLock() + defer mh.timeoutLock.RUnlock() + return mh.resetTimeout +} + +// failsafe condition after lastResetTime +func (mh *eventSourceMessageHolder) fullResetTimeout() bool { + resetTimeout := mh.getResetTimeout() + return resetTimeout != 0 && time.Now().Unix() > resetTimeout +} + +func (mh *eventSourceMessageHolder) getDependencyName(eventSourceName, eventName string) (string, error) { + for k, v := range mh.sourceDepMap { + sourceGlob, err := glob.Compile(k) + if err != nil { + return "", err + } + if sourceGlob.Match(eventSourceName + "__" + eventName) { + return v, nil + } + } + return "", nil +} + +// Ack the stan message and cache the ID to make sure Exact Once triggering +func (mh *eventSourceMessageHolder) ackAndCache(m *stan.Msg, id string) { + _ = m.Ack() + mh.smap.Store(id, time.Now().UnixNano()) +} + +// Reset the parameter and message that a dependency holds +func (mh *eventSourceMessageHolder) reset(depName string) { + mh.parameters[depName] = false + delete(mh.msgs, depName) + + if mh.isCleanedUp() { + mh.setResetTimeout(0) + } +} + +func (mh *eventSourceMessageHolder) resetAll() { + for k := range mh.msgs { + delete(mh.msgs, k) + } + + for k := range mh.parameters { + mh.parameters[k] = false + } + mh.setResetTimeout(0) +} + +// Check if all the parameters and messages have been cleaned up +func (mh *eventSourceMessageHolder) isCleanedUp() bool { + for _, v := range mh.parameters { + if v == true { + return false + } + } + return len(mh.msgs) == 0 +} + +func unique(stringSlice []string) []string { + if len(stringSlice) == 0 { + return stringSlice + } + keys := make(map[string]bool) + list := []string{} + for _, entry := range stringSlice { + if _, value := keys[entry]; !value { + keys[entry] = true + list = append(list, entry) + } + } + return list +} diff --git a/eventsources/cmd/start.go b/eventsources/cmd/start.go index d73302c8ae..0f5b2d9a84 100644 --- a/eventsources/cmd/start.go +++ b/eventsources/cmd/start.go @@ -10,6 +10,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" argoevents "github.com/argoproj/argo-events" + "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" "github.com/argoproj/argo-events/eventsources" @@ -60,8 +61,13 @@ func Start() { m := metrics.NewMetrics(eventSource.Namespace) go m.Run(ctx, fmt.Sprintf(":%d", common.EventSourceMetricsPort)) + cfClient, err := codefresh.NewClient(ctx, eventSource.Namespace) + if err != nil { + logger.Fatalw("unable to initialise Codefresh Client", zap.Error(err)) + } + logger.Infow("starting eventsource server", "version", argoevents.GetVersion()) - adaptor := eventsources.NewEventSourceAdaptor(eventSource, busConfig, ebSubject, hostname, m) + adaptor := eventsources.NewEventSourceAdaptor(eventSource, busConfig, ebSubject, hostname, m, cfClient) if err := adaptor.Start(ctx); err != nil { logger.Fatalw("failed to start eventsource server", zap.Error(err)) } diff --git a/eventsources/common/aws/aws.go b/eventsources/common/aws/aws.go index 607001ac7a..6441b5fca9 100644 --- a/eventsources/common/aws/aws.go +++ b/eventsources/common/aws/aws.go @@ -17,11 +17,12 @@ limitations under the License. package aws import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/session" - "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" "github.com/argoproj/argo-events/common" @@ -31,11 +32,11 @@ import ( func GetAWSCredFromEnvironment(access *corev1.SecretKeySelector, secret *corev1.SecretKeySelector) (*credentials.Credentials, error) { accessKey, ok := common.GetEnvFromSecret(access) if !ok { - return nil, errors.Errorf("can not find envFrom %v", access) + return nil, fmt.Errorf("can not find envFrom %v", access) } secretKey, ok := common.GetEnvFromSecret(secret) if !ok { - return nil, errors.Errorf("can not find envFrom %v", secret) + return nil, fmt.Errorf("can not find envFrom %v", secret) } return credentials.NewStaticCredentialsFromCreds(credentials.Value{ AccessKeyID: accessKey, @@ -44,18 +45,28 @@ func GetAWSCredFromEnvironment(access *corev1.SecretKeySelector, secret *corev1. } // GetAWSCredFromVolume reads credential stored in mounted secret volume. -func GetAWSCredFromVolume(access *corev1.SecretKeySelector, secret *corev1.SecretKeySelector) (*credentials.Credentials, error) { +func GetAWSCredFromVolume(access *corev1.SecretKeySelector, secret *corev1.SecretKeySelector, sessionToken *corev1.SecretKeySelector) (*credentials.Credentials, error) { accessKey, err := common.GetSecretFromVolume(access) if err != nil { - return nil, errors.Wrap(err, "can not find access key") + return nil, fmt.Errorf("can not find access key, %w", err) } secretKey, err := common.GetSecretFromVolume(secret) if err != nil { - return nil, errors.Wrap(err, "can not find secret key") + return nil, fmt.Errorf("can not find secret key, %w", err) + } + + var token string + if sessionToken != nil { + token, err = common.GetSecretFromVolume(sessionToken) + if err != nil { + return nil, fmt.Errorf("can not find session token, %w", err) + } } + return credentials.NewStaticCredentialsFromCreds(credentials.Value{ AccessKeyID: accessKey, SecretAccessKey: secretKey, + SessionToken: token, }), nil } @@ -97,7 +108,7 @@ func CreateAWSSessionWithCredsInEnv(region string, roleARN string, accessKey *co } // CreateAWSSessionWithCredsInVolume based on credentials in mounted volumes, return a aws session -func CreateAWSSessionWithCredsInVolume(region string, roleARN string, accessKey *corev1.SecretKeySelector, secretKey *corev1.SecretKeySelector) (*session.Session, error) { +func CreateAWSSessionWithCredsInVolume(region string, roleARN string, accessKey *corev1.SecretKeySelector, secretKey *corev1.SecretKeySelector, sessionToken *corev1.SecretKeySelector) (*session.Session, error) { if roleARN != "" { return GetAWSAssumeRoleCreds(roleARN, region) } @@ -106,7 +117,7 @@ func CreateAWSSessionWithCredsInVolume(region string, roleARN string, accessKey return GetAWSSessionWithoutCreds(region) } - creds, err := GetAWSCredFromVolume(accessKey, secretKey) + creds, err := GetAWSCredFromVolume(accessKey, secretKey, sessionToken) if err != nil { return nil, err } diff --git a/eventsources/common/common.go b/eventsources/common/common.go new file mode 100644 index 0000000000..cf2b08bc4e --- /dev/null +++ b/eventsources/common/common.go @@ -0,0 +1,13 @@ +package common + +import "github.com/cloudevents/sdk-go/v2/event" + +type Option func(*event.Event) error + +// Option to set different ID for event +func WithID(id string) Option { + return func(e *event.Event) error { + e.SetID(id) + return nil + } +} diff --git a/eventsources/common/fsevent/fileevent_test.go b/eventsources/common/fsevent/fileevent_test.go new file mode 100644 index 0000000000..e38aca8556 --- /dev/null +++ b/eventsources/common/fsevent/fileevent_test.go @@ -0,0 +1,57 @@ +package fsevent + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestOpString(t *testing.T) { + tests := []struct { + op Op + expected string + }{ + {Create, "CREATE"}, + {Remove, "REMOVE"}, + {Write, "WRITE"}, + {Rename, "RENAME"}, + {Chmod, "CHMOD"}, + {Create | Write, "CREATE|WRITE"}, + } + + for _, tt := range tests { + assert.Equal(t, tt.expected, tt.op.String(), "Op.String() for op %d", tt.op) + } +} + +func TestNewOp(t *testing.T) { + tests := []struct { + input string + expected Op + }{ + {"CREATE", Create}, + {"REMOVE", Remove}, + {"WRITE", Write}, + {"RENAME", Rename}, + {"CHMOD", Chmod}, + {"CREATE|WRITE", Create | Write}, + } + + for _, tt := range tests { + assert.Equal(t, tt.expected, NewOp(tt.input), "NewOp(%q)", tt.input) + } +} + +func TestEventString(t *testing.T) { + tests := []struct { + event Event + expected string + }{ + {Event{Name: "file1", Op: Create}, `"file1": CREATE`}, + {Event{Name: "file2", Op: Remove | Write}, `"file2": REMOVE|WRITE`}, + } + + for _, tt := range tests { + assert.Equal(t, tt.expected, tt.event.String(), "Event.String() for event %#v", tt.event) + } +} diff --git a/eventsources/common/naivewatcher/watcher.go b/eventsources/common/naivewatcher/watcher.go index 2ef46bf1dd..dde1554fa3 100644 --- a/eventsources/common/naivewatcher/watcher.go +++ b/eventsources/common/naivewatcher/watcher.go @@ -1,7 +1,7 @@ package naivewatcher import ( - "errors" + "fmt" "os" "path/filepath" "sync" @@ -99,7 +99,7 @@ func (w *Watcher) Close() error { // Start starts the watcher func (w *Watcher) Start(interval time.Duration) error { if !w.mutexRunning.TryLock() { - return errors.New("watcher has already started") + return fmt.Errorf("watcher has already started") } // run initial check err := w.Check() @@ -127,7 +127,7 @@ func (w *Watcher) Start(interval time.Duration) error { // Stop stops the watcher func (w *Watcher) Stop() error { if !w.mutexRunning.IsLocked() { - return errors.New("watcher is not started") + return fmt.Errorf("watcher is not started") } select { case <-w.stop: @@ -141,7 +141,7 @@ func (w *Watcher) Stop() error { // Check checks the state of target directories func (w *Watcher) Check() error { if !w.mCheck.TryLock() { - return errors.New("another check is still running") + return fmt.Errorf("another check is still running") } defer w.mCheck.Unlock() diff --git a/eventsources/common/naivewatcher/watcher_test.go b/eventsources/common/naivewatcher/watcher_test.go index d05ea4b25e..08b4303437 100644 --- a/eventsources/common/naivewatcher/watcher_test.go +++ b/eventsources/common/naivewatcher/watcher_test.go @@ -2,16 +2,14 @@ package naivewatcher import ( "fmt" - "io/ioutil" "os" "path/filepath" "syscall" "testing" "time" - "github.com/stretchr/testify/assert" - "github.com/argoproj/argo-events/eventsources/common/fsevent" + "github.com/stretchr/testify/assert" ) type WatchableTestFS struct { @@ -41,7 +39,7 @@ func TestWatcherAutoCheck(t *testing.T) { } defer watcher.Close() - tmpdir, err := ioutil.TempDir("", "naive-watcher-") + tmpdir, err := os.MkdirTemp("", "naive-watcher-") if err != nil { t.Fatal(err) } @@ -52,7 +50,7 @@ func TestWatcherAutoCheck(t *testing.T) { t.Fatal(err) } - err = watcher.Start(100 * time.Millisecond) + err = watcher.Start(50 * time.Millisecond) if err != nil { t.Fatal(err) } @@ -67,7 +65,7 @@ func TestWatcherAutoCheck(t *testing.T) { if err != nil { t.Fatal(err) } - time.Sleep(200 * time.Millisecond) + time.Sleep(300 * time.Millisecond) events := readEvents(t, watcher) assert.Equal(t, []fsevent.Event{ {Op: fsevent.Create, Name: filepath.Join(tmpdir, "foo")}, @@ -78,18 +76,18 @@ func TestWatcherAutoCheck(t *testing.T) { if err != nil { t.Fatal(err) } - time.Sleep(200 * time.Millisecond) + time.Sleep(300 * time.Millisecond) events = readEvents(t, watcher) assert.Equal(t, []fsevent.Event{ {Op: fsevent.Rename, Name: filepath.Join(tmpdir, "bar")}, }, events) // Write a file - err = ioutil.WriteFile(filepath.Join(tmpdir, "bar"), []byte("wow"), 0666) + err = os.WriteFile(filepath.Join(tmpdir, "bar"), []byte("wow"), 0666) if err != nil { t.Fatal(err) } - time.Sleep(200 * time.Millisecond) + time.Sleep(300 * time.Millisecond) events = readEvents(t, watcher) assert.Equal(t, []fsevent.Event{ {Op: fsevent.Write, Name: filepath.Join(tmpdir, "bar")}, @@ -100,7 +98,7 @@ func TestWatcherAutoCheck(t *testing.T) { if err != nil { t.Fatal(err) } - time.Sleep(200 * time.Millisecond) + time.Sleep(300 * time.Millisecond) events = readEvents(t, watcher) assert.Equal(t, []fsevent.Event{ {Op: fsevent.Chmod, Name: filepath.Join(tmpdir, "bar")}, @@ -111,7 +109,7 @@ func TestWatcherAutoCheck(t *testing.T) { if err != nil { t.Fatal(err) } - err = ioutil.WriteFile(filepath.Join(tmpdir, "foo"), []byte("wowwow"), 0666) + err = os.WriteFile(filepath.Join(tmpdir, "foo"), []byte("wowwow"), 0666) if err != nil { t.Fatal(err) } @@ -120,7 +118,7 @@ func TestWatcherAutoCheck(t *testing.T) { t.Fatal(err) } var actualOps fsevent.Op - time.Sleep(200 * time.Millisecond) + time.Sleep(300 * time.Millisecond) events = readEvents(t, watcher) for _, event := range events { if event.Name == filepath.Join(tmpdir, "foo") { @@ -134,7 +132,7 @@ func TestWatcherAutoCheck(t *testing.T) { if err != nil { t.Fatal(err) } - time.Sleep(200 * time.Millisecond) + time.Sleep(300 * time.Millisecond) events = readEvents(t, watcher) assert.Equal(t, []fsevent.Event{ {Op: fsevent.Remove, Name: filepath.Join(tmpdir, "foo")}, @@ -158,7 +156,7 @@ func TestWatcherManualCheck(t *testing.T) { } defer watcher.Close() - tmpdir, err := ioutil.TempDir("", "naive-watcher-") + tmpdir, err := os.MkdirTemp("", "naive-watcher-") if err != nil { t.Fatal(err) } @@ -193,7 +191,7 @@ func TestWatcherManualCheck(t *testing.T) { }, events) // Write a file - err = ioutil.WriteFile(filepath.Join(tmpdir, "bar"), []byte("wow"), 0666) + err = os.WriteFile(filepath.Join(tmpdir, "bar"), []byte("wow"), 0666) if err != nil { t.Fatal(err) } @@ -217,7 +215,7 @@ func TestWatcherManualCheck(t *testing.T) { if err != nil { t.Fatal(err) } - err = ioutil.WriteFile(filepath.Join(tmpdir, "foo"), []byte("wowwow"), 0666) + err = os.WriteFile(filepath.Join(tmpdir, "foo"), []byte("wowwow"), 0666) if err != nil { t.Fatal(err) } diff --git a/eventsources/common/webhook/webhook.go b/eventsources/common/webhook/webhook.go index 9541f98f00..9dc7c1c87a 100644 --- a/eventsources/common/webhook/webhook.go +++ b/eventsources/common/webhook/webhook.go @@ -27,6 +27,7 @@ import ( "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" metrics "github.com/argoproj/argo-events/metrics" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" ) @@ -104,13 +105,6 @@ func startServer(router Router, controller *Controller) { if err != nil { route.Logger.With("port", route.Context.Port).Errorw("failed to listen and serve with TLS configured", zap.Error(err)) } - case route.Context.DeprecatedServerCertPath != "" && route.Context.DeprecatedServerKeyPath != "": - // DEPRECATED. - route.Logger.Warn("ServerCertPath and ServerKeyPath are deprecated, please use ServerCertSecret and ServerKeySecret") - err := server.ListenAndServeTLS(route.Context.DeprecatedServerCertPath, route.Context.DeprecatedServerKeyPath) - if err != nil { - route.Logger.With("port", route.Context.Port).Errorw("failed to listen and serve with TLS configured", zap.Error(err)) - } default: err := server.ListenAndServe() if err != nil { @@ -147,6 +141,10 @@ func startServer(router Router, controller *Controller) { return } } + if request.Header.Get("Authorization") != "" { + // Auth secret stops here + request.Header.Set("Authorization", "*** Masked Auth Secret ***") + } router.HandleRoute(writer, request) }) } @@ -180,7 +178,7 @@ func activateRoute(router Router, controller *Controller) { } // manageRouteChannels consumes data from route's data channel and stops the processing when the event source is stopped/removed -func manageRouteChannels(router Router, dispatch func([]byte) error) { +func manageRouteChannels(router Router, dispatch func([]byte, ...eventsourcecommon.Option) error) { route := router.GetRoute() logger := route.Logger for { @@ -201,7 +199,7 @@ func manageRouteChannels(router Router, dispatch func([]byte) error) { } // ManagerRoute manages the lifecycle of a route -func ManageRoute(ctx context.Context, router Router, controller *Controller, dispatch func([]byte) error) error { +func ManageRoute(ctx context.Context, router Router, controller *Controller, dispatch func([]byte, ...eventsourcecommon.Option) error) error { route := router.GetRoute() logger := route.Logger diff --git a/eventsources/eventing.go b/eventsources/eventing.go index a92651591c..8a9687707f 100644 --- a/eventsources/eventing.go +++ b/eventsources/eventing.go @@ -2,33 +2,40 @@ package eventsources import ( "context" + "crypto/rand" "encoding/json" "fmt" - "math/rand" + "math/big" "strings" "sync" "time" cloudevents "github.com/cloudevents/sdk-go/v2" "github.com/google/uuid" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/expr" "github.com/argoproj/argo-events/common/leaderelection" "github.com/argoproj/argo-events/common/logging" "github.com/argoproj/argo-events/eventbus" - eventbusdriver "github.com/argoproj/argo-events/eventbus/driver" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources/amqp" "github.com/argoproj/argo-events/eventsources/sources/awssns" "github.com/argoproj/argo-events/eventsources/sources/awssqs" "github.com/argoproj/argo-events/eventsources/sources/azureeventshub" + "github.com/argoproj/argo-events/eventsources/sources/azurequeuestorage" + "github.com/argoproj/argo-events/eventsources/sources/azureservicebus" + "github.com/argoproj/argo-events/eventsources/sources/bitbucket" + "github.com/argoproj/argo-events/eventsources/sources/bitbucketserver" "github.com/argoproj/argo-events/eventsources/sources/calendar" "github.com/argoproj/argo-events/eventsources/sources/emitter" "github.com/argoproj/argo-events/eventsources/sources/file" "github.com/argoproj/argo-events/eventsources/sources/gcppubsub" "github.com/argoproj/argo-events/eventsources/sources/generic" + "github.com/argoproj/argo-events/eventsources/sources/gerrit" "github.com/argoproj/argo-events/eventsources/sources/github" "github.com/argoproj/argo-events/eventsources/sources/gitlab" "github.com/argoproj/argo-events/eventsources/sources/hdfs" @@ -39,7 +46,9 @@ import ( "github.com/argoproj/argo-events/eventsources/sources/nsq" "github.com/argoproj/argo-events/eventsources/sources/pulsar" "github.com/argoproj/argo-events/eventsources/sources/redis" + redisstream "github.com/argoproj/argo-events/eventsources/sources/redis_stream" "github.com/argoproj/argo-events/eventsources/sources/resource" + "github.com/argoproj/argo-events/eventsources/sources/sftp" "github.com/argoproj/argo-events/eventsources/sources/slack" "github.com/argoproj/argo-events/eventsources/sources/storagegrid" "github.com/argoproj/argo-events/eventsources/sources/stripe" @@ -48,6 +57,7 @@ import ( apicommon "github.com/argoproj/argo-events/pkg/apis/common" eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + "github.com/pkg/errors" ) // EventingServer is the server API for Eventing service. @@ -63,15 +73,19 @@ type EventingServer interface { GetEventSourceType() apicommon.EventSourceType // Function to start listening events. - StartListening(ctx context.Context, dispatch func([]byte) error) error + StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error } // GetEventingServers returns the mapping of event source type and list of eventing servers -func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcemetrics.Metrics) map[apicommon.EventSourceType][]EventingServer { +func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcemetrics.Metrics) (map[apicommon.EventSourceType][]EventingServer, map[string]*v1alpha1.EventSourceFilter) { result := make(map[apicommon.EventSourceType][]EventingServer) + filters := make(map[string]*v1alpha1.EventSourceFilter) if len(eventSource.Spec.AMQP) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.AMQP { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &amqp.EventListener{EventSourceName: eventSource.Name, EventName: k, AMQPEventSource: v, Metrics: metrics}) } result[apicommon.AMQPEvent] = servers @@ -79,13 +93,59 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.AzureEventsHub) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.AzureEventsHub { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &azureeventshub.EventListener{EventSourceName: eventSource.Name, EventName: k, AzureEventsHubEventSource: v, Metrics: metrics}) } result[apicommon.AzureEventsHub] = servers } + if len(eventSource.Spec.AzureQueueStorage) != 0 { + servers := []EventingServer{} + for k, v := range eventSource.Spec.AzureQueueStorage { + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &azurequeuestorage.EventListener{EventSourceName: eventSource.Name, EventName: k, AzureQueueStorageEventSource: v, Metrics: metrics}) + } + result[apicommon.AzureQueueStorage] = servers + } + if len(eventSource.Spec.AzureServiceBus) != 0 { + servers := []EventingServer{} + for k, v := range eventSource.Spec.AzureServiceBus { + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &azureservicebus.EventListener{EventSourceName: eventSource.Name, EventName: k, AzureServiceBusEventSource: v, Metrics: metrics}) + } + result[apicommon.AzureServiceBus] = servers + } + if len(eventSource.Spec.Bitbucket) != 0 { + servers := []EventingServer{} + for k, v := range eventSource.Spec.Bitbucket { + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &bitbucket.EventListener{EventSourceName: eventSource.Name, EventName: k, BitbucketEventSource: v, Metrics: metrics}) + } + result[apicommon.BitbucketEvent] = servers + } + if len(eventSource.Spec.BitbucketServer) != 0 { + servers := []EventingServer{} + for k, v := range eventSource.Spec.BitbucketServer { + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &bitbucketserver.EventListener{EventSourceName: eventSource.Name, EventName: k, BitbucketServerEventSource: v, Metrics: metrics}) + } + result[apicommon.BitbucketServerEvent] = servers + } if len(eventSource.Spec.Calendar) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Calendar { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &calendar.EventListener{EventSourceName: eventSource.Name, EventName: k, CalendarEventSource: v, Namespace: eventSource.Namespace, Metrics: metrics}) } result[apicommon.CalendarEvent] = servers @@ -93,6 +153,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Emitter) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Emitter { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &emitter.EventListener{EventSourceName: eventSource.Name, EventName: k, EmitterEventSource: v, Metrics: metrics}) } result[apicommon.EmitterEvent] = servers @@ -100,13 +163,39 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.File) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.File { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &file.EventListener{EventSourceName: eventSource.Name, EventName: k, FileEventSource: v, Metrics: metrics}) } result[apicommon.FileEvent] = servers } + if len(eventSource.Spec.SFTP) != 0 { + servers := []EventingServer{} + for k, v := range eventSource.Spec.SFTP { + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &sftp.EventListener{EventSourceName: eventSource.Name, EventName: k, SFTPEventSource: v, Metrics: metrics}) + } + result[apicommon.SFTPEvent] = servers + } + if len(eventSource.Spec.Gerrit) != 0 { + servers := []EventingServer{} + for k, v := range eventSource.Spec.Gerrit { + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &gerrit.EventListener{EventSourceName: eventSource.Name, EventName: k, GerritEventSource: v, Metrics: metrics}) + } + result[apicommon.GerritEvent] = servers + } if len(eventSource.Spec.Github) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Github { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &github.EventListener{EventSourceName: eventSource.Name, EventName: k, GithubEventSource: v, Metrics: metrics}) } result[apicommon.GithubEvent] = servers @@ -114,6 +203,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Gitlab) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Gitlab { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &gitlab.EventListener{EventSourceName: eventSource.Name, EventName: k, GitlabEventSource: v, Metrics: metrics}) } result[apicommon.GitlabEvent] = servers @@ -121,6 +213,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.HDFS) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.HDFS { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &hdfs.EventListener{EventSourceName: eventSource.Name, EventName: k, HDFSEventSource: v, Metrics: metrics}) } result[apicommon.HDFSEvent] = servers @@ -128,6 +223,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Kafka) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Kafka { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &kafka.EventListener{EventSourceName: eventSource.Name, EventName: k, KafkaEventSource: v, Metrics: metrics}) } result[apicommon.KafkaEvent] = servers @@ -135,6 +233,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.MQTT) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.MQTT { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &mqtt.EventListener{EventSourceName: eventSource.Name, EventName: k, MQTTEventSource: v, Metrics: metrics}) } result[apicommon.MQTTEvent] = servers @@ -149,6 +250,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.NATS) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.NATS { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &nats.EventListener{EventSourceName: eventSource.Name, EventName: k, NATSEventSource: v, Metrics: metrics}) } result[apicommon.NATSEvent] = servers @@ -156,6 +260,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.NSQ) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.NSQ { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &nsq.EventListener{EventSourceName: eventSource.Name, EventName: k, NSQEventSource: v, Metrics: metrics}) } result[apicommon.NSQEvent] = servers @@ -163,6 +270,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.PubSub) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.PubSub { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &gcppubsub.EventListener{EventSourceName: eventSource.Name, EventName: k, PubSubEventSource: v, Metrics: metrics}) } result[apicommon.PubSubEvent] = servers @@ -170,13 +280,29 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Redis) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Redis { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &redis.EventListener{EventSourceName: eventSource.Name, EventName: k, RedisEventSource: v, Metrics: metrics}) } result[apicommon.RedisEvent] = servers } + if len(eventSource.Spec.RedisStream) != 0 { + servers := []EventingServer{} + for k, v := range eventSource.Spec.RedisStream { + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &redisstream.EventListener{EventSourceName: eventSource.Name, EventName: k, EventSource: v, Metrics: metrics}) + } + result[apicommon.RedisStreamEvent] = servers + } if len(eventSource.Spec.SNS) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.SNS { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &awssns.EventListener{EventSourceName: eventSource.Name, EventName: k, SNSEventSource: v, Metrics: metrics}) } result[apicommon.SNSEvent] = servers @@ -184,6 +310,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.SQS) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.SQS { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &awssqs.EventListener{EventSourceName: eventSource.Name, EventName: k, SQSEventSource: v, Metrics: metrics}) } result[apicommon.SQSEvent] = servers @@ -191,6 +320,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Slack) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Slack { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &slack.EventListener{EventSourceName: eventSource.Name, EventName: k, SlackEventSource: v, Metrics: metrics}) } result[apicommon.SlackEvent] = servers @@ -212,7 +344,10 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Webhook) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Webhook { - servers = append(servers, &webhook.EventListener{EventSourceName: eventSource.Name, EventName: k, WebhookContext: v, Metrics: metrics}) + if v.Filter != nil { + filters[k] = v.Filter + } + servers = append(servers, &webhook.EventListener{EventSourceName: eventSource.Name, EventName: k, Webhook: v, Metrics: metrics}) } result[apicommon.WebhookEvent] = servers } @@ -226,6 +361,9 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Pulsar) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Pulsar { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &pulsar.EventListener{EventSourceName: eventSource.Name, EventName: k, PulsarEventSource: v, Metrics: metrics}) } result[apicommon.PulsarEvent] = servers @@ -233,11 +371,14 @@ func GetEventingServers(eventSource *v1alpha1.EventSource, metrics *eventsourcem if len(eventSource.Spec.Generic) != 0 { servers := []EventingServer{} for k, v := range eventSource.Spec.Generic { + if v.Filter != nil { + filters[k] = v.Filter + } servers = append(servers, &generic.EventListener{EventSourceName: eventSource.Name, EventName: k, GenericEventSource: v, Metrics: metrics}) } result[apicommon.GenericEvent] = servers } - return result + return result, filters } // EventSourceAdaptor is the adaptor for eventsource service @@ -247,19 +388,22 @@ type EventSourceAdaptor struct { eventBusSubject string hostname string - eventBusConn eventbusdriver.Connection + eventBusConn eventbuscommon.EventSourceConnection metrics *eventsourcemetrics.Metrics + + cfClient *codefresh.Client } // NewEventSourceAdaptor returns a new EventSourceAdaptor -func NewEventSourceAdaptor(eventSource *v1alpha1.EventSource, eventBusConfig *eventbusv1alpha1.BusConfig, eventBusSubject, hostname string, metrics *eventsourcemetrics.Metrics) *EventSourceAdaptor { +func NewEventSourceAdaptor(eventSource *v1alpha1.EventSource, eventBusConfig *eventbusv1alpha1.BusConfig, eventBusSubject, hostname string, metrics *eventsourcemetrics.Metrics, cfClient *codefresh.Client) *EventSourceAdaptor { return &EventSourceAdaptor{ eventSource: eventSource, eventBusConfig: eventBusConfig, eventBusSubject: eventBusSubject, hostname: hostname, metrics: metrics, + cfClient: cfClient, } } @@ -271,67 +415,76 @@ func (e *EventSourceAdaptor) Start(ctx context.Context) error { for _, esType := range apicommon.RecreateStrategyEventSources { recreateTypes[esType] = true } - isRecreatType := false - servers := GetEventingServers(e.eventSource, e.metrics) + isRecreateType := false + servers, filters := GetEventingServers(e.eventSource, e.metrics) for k := range servers { if _, ok := recreateTypes[k]; ok { - isRecreatType = true + isRecreateType = true } // This is based on the presumption that all the events in one // EventSource object use the same type of deployment strategy break } - if !isRecreatType { - return e.run(ctx, servers) + + if !isRecreateType { + return e.run(ctx, servers, filters) } - custerName := fmt.Sprintf("%s-eventsource-%s", e.eventSource.Namespace, e.eventSource.Name) - elector, err := leaderelection.NewEventBusElector(ctx, *e.eventBusConfig, custerName, int(e.eventSource.Spec.GetReplicas())) + clusterName := fmt.Sprintf("%s-eventsource-%s", e.eventSource.Namespace, e.eventSource.Name) + replicas := int(e.eventSource.Spec.GetReplicas()) + leasename := fmt.Sprintf("eventsource-%s", e.eventSource.Name) + + elector, err := leaderelection.NewElector(ctx, *e.eventBusConfig, clusterName, replicas, e.eventSource.Namespace, leasename, e.hostname) if err != nil { log.Errorw("failed to get an elector", zap.Error(err)) return err } + elector.RunOrDie(ctx, leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { - if err := e.run(ctx, servers); err != nil { - log.Errorw("failed to start", zap.Error(err)) + if err := e.run(ctx, servers, filters); err != nil { + log.Fatalw("failed to start", zap.Error(err)) } }, OnStoppedLeading: func() { - log.Infof("leader lost: %s", e.hostname) + log.Fatalf("leader lost: %s", e.hostname) }, }) return nil } -func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.EventSourceType][]EventingServer) error { +func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.EventSourceType][]EventingServer, filters map[string]*v1alpha1.EventSourceFilter) error { logger := logging.FromContext(ctx) logger.Info("Starting event source server...") - clientID := generateClientID(e.hostname) - driver, err := eventbus.GetDriver(ctx, *e.eventBusConfig, e.eventBusSubject, clientID) + driver, err := eventbus.GetEventSourceDriver(ctx, *e.eventBusConfig, e.eventSource.Name, e.eventBusSubject) if err != nil { logger.Errorw("failed to get eventbus driver", zap.Error(err)) + e.cfClient.ReportError(errors.Wrap(err, "failed to get eventbus driver"), codefresh.ErrorContext{ + ObjectMeta: e.eventSource.ObjectMeta, + TypeMeta: e.eventSource.TypeMeta, + }) return err } - if err = common.Connect(&common.DefaultBackoff, func() error { - e.eventBusConn, err = driver.Connect() + if err = common.DoWithRetry(&common.DefaultBackoff, func() error { + err = driver.Initialize() + if err != nil { + return err + } + e.eventBusConn, err = driver.Connect(clientID) return err }); err != nil { logger.Errorw("failed to connect to eventbus", zap.Error(err)) + e.cfClient.ReportError(errors.Wrap(err, "failed to connect to eventbus"), codefresh.ErrorContext{ + ObjectMeta: e.eventSource.ObjectMeta, + TypeMeta: e.eventSource.TypeMeta, + }) return err } defer e.eventBusConn.Close() - namespace := e.eventSource.ObjectMeta.Namespace - cfConfig, err := codefresh.GetCodefreshConfig(ctx, namespace) - if err != nil { - logger.Errorw("failed to get Codefresh config", zap.Error(err)) - return err - } - - cctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(ctx) connWG := &sync.WaitGroup{} // Daemon to reconnect @@ -343,7 +496,7 @@ func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.Even defer ticker.Stop() for { select { - case <-cctx.Done(): + case <-ctx.Done(): logger.Info("exiting eventbus connection daemon...") return case <-ticker.C: @@ -351,12 +504,12 @@ func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.Even logger.Info("NATS connection lost, reconnecting...") // Regenerate the client ID to avoid the issue that NAT server still thinks the client is alive. clientID := generateClientID(e.hostname) - driver, err := eventbus.GetDriver(cctx, *e.eventBusConfig, e.eventBusSubject, clientID) + driver, err := eventbus.GetEventSourceDriver(ctx, *e.eventBusConfig, e.eventSource.Name, e.eventBusSubject) if err != nil { logger.Errorw("failed to get eventbus driver during reconnection", zap.Error(err)) continue } - e.eventBusConn, err = driver.Connect() + e.eventBusConn, err = driver.Connect(clientID) if err != nil { logger.Errorw("failed to reconnect to eventbus", zap.Error(err)) continue @@ -371,18 +524,23 @@ func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.Even for _, ss := range servers { for _, server := range ss { // Validation has been done in eventsource-controller, it's harmless to do it again here. - err := server.ValidateEventSource(cctx) + err := server.ValidateEventSource(ctx) if err != nil { logger.Errorw("Validation failed", zap.Error(err), zap.Any(logging.LabelEventName, server.GetEventName()), zap.Any(logging.LabelEventSourceType, server.GetEventSourceType())) + e.cfClient.ReportError(errors.Wrap(err, "Validation failed"), codefresh.ErrorContext{ + ObjectMeta: e.eventSource.ObjectMeta, + TypeMeta: e.eventSource.TypeMeta, + }) + // Continue starting other event services instead of failing all of them continue } wg.Add(1) go func(s EventingServer) { + defer wg.Done() e.metrics.IncRunningServices(s.GetEventSourceName()) defer e.metrics.DecRunningServices(s.GetEventSourceName()) - defer wg.Done() duration := apicommon.FromString("1s") factor := apicommon.NewAmount("1") jitter := apicommon.NewAmount("30") @@ -392,14 +550,32 @@ func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.Even Factor: &factor, Jitter: &jitter, } - if err = common.Connect(&backoff, func() error { - return s.StartListening(cctx, func(data []byte) error { + if err = common.DoWithRetry(&backoff, func() error { + return s.StartListening(ctx, func(data []byte, opts ...eventsourcecommon.Option) error { + if filter, ok := filters[s.GetEventName()]; ok { + proceed, err := filterEvent(data, filter) + if err != nil { + logger.Errorw("Failed to filter event", zap.Error(err)) + return nil + } + if !proceed { + logger.Info("Filter condition not met, skip dispatching") + return nil + } + } + event := cloudevents.NewEvent() event.SetID(fmt.Sprintf("%x", uuid.New())) event.SetType(string(s.GetEventSourceType())) event.SetSource(s.GetEventSourceName()) event.SetSubject(s.GetEventName()) event.SetTime(time.Now()) + for _, opt := range opts { + err := opt(&event) + if err != nil { + return err + } + } err := event.SetData(cloudevents.ApplicationJSON, data) if err != nil { return err @@ -408,32 +584,46 @@ func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.Even if err != nil { return err } + if e.eventBusConn == nil || e.eventBusConn.IsClosed() { - return errors.New("failed to publish event, eventbus connection closed") + return eventbuscommon.NewEventBusError(fmt.Errorf("failed to publish event, eventbus connection closed")) + } + + msg := eventbuscommon.Message{ + MsgHeader: eventbuscommon.MsgHeader{ + EventSourceName: s.GetEventSourceName(), + EventName: s.GetEventName(), + ID: event.ID(), + }, + Body: eventBody, } - if err = driver.Publish(e.eventBusConn, eventBody); err != nil { - logger.Errorw("failed to publish an event", zap.Error(err), zap.String(logging.LabelEventName, - s.GetEventName()), zap.Any(logging.LabelEventSourceType, s.GetEventSourceType())) + logger.Debugw(string(data), zap.String("eventID", event.ID())) + if err = common.DoWithRetry(&common.DefaultBackoff, func() error { + return e.eventBusConn.Publish(ctx, msg) + }); err != nil { + logger.Errorw("Failed to publish an event", zap.Error(err), zap.String(logging.LabelEventName, + s.GetEventName()), zap.Any(logging.LabelEventSourceType, s.GetEventSourceType()), zap.String("eventID", event.ID())) e.metrics.EventSentFailed(s.GetEventSourceName(), s.GetEventName()) - return err + e.cfClient.ReportError( + errors.Wrapf(err, "failed to publish an event { %s: %s, %s: %s }", + logging.LabelEventName, s.GetEventName(), logging.LabelEventSourceType, s.GetEventSourceType()), + codefresh.ErrorContext{ + ObjectMeta: e.eventSource.ObjectMeta, + TypeMeta: e.eventSource.TypeMeta, + }, + ) + return eventbuscommon.NewEventBusError(err) } - logger.Infow("succeeded to publish an event", zap.String(logging.LabelEventName, + logger.Infow("Succeeded to publish an event", zap.String(logging.LabelEventName, s.GetEventName()), zap.Any(logging.LabelEventSourceType, s.GetEventSourceType()), zap.String("eventID", event.ID())) e.metrics.EventSent(s.GetEventSourceName(), s.GetEventName()) - err = codefresh.ReportEventToCodefresh(eventBody, cfConfig) - if err != nil { - logger.Errorw("failed to report an event to Codefresh", zap.Error(err), - zap.String(logging.LabelEventName, s.GetEventName()), zap.Any(logging.LabelEventSourceType, s.GetEventSourceType())) - } else { - logger.Infow("succeeded to report an event to Codefresh", zap.String(logging.LabelEventName, s.GetEventName()), - zap.Any(logging.LabelEventSourceType, s.GetEventSourceType()), zap.String("eventID", event.ID())) - } + e.cfClient.ReportEvent(event) return nil }) }); err != nil { - logger.Errorw("failed to start listening eventsource", zap.Any(logging.LabelEventSourceType, + logger.Errorw("Failed to start listening eventsource", zap.Any(logging.LabelEventSourceType, s.GetEventSourceType()), zap.Any(logging.LabelEventName, s.GetEventName()), zap.Error(err)) } }(server) @@ -459,14 +649,28 @@ func (e *EventSourceAdaptor) run(ctx context.Context, servers map[apicommon.Even logger.Error("Erroring out, no active event server running") cancel() connWG.Wait() - return errors.New("no active event server running") + return fmt.Errorf("no active event server running") } } } func generateClientID(hostname string) string { - s1 := rand.NewSource(time.Now().UnixNano()) - r1 := rand.New(s1) - clientID := fmt.Sprintf("client-%s-%v", strings.ReplaceAll(hostname, ".", "_"), r1.Intn(1000)) + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(1000))) + clientID := fmt.Sprintf("client-%s-%v", strings.ReplaceAll(hostname, ".", "_"), randomNum.Int64()) return clientID } + +func filterEvent(data []byte, filter *v1alpha1.EventSourceFilter) (bool, error) { + dataMap := make(map[string]interface{}) + err := json.Unmarshal(data, &dataMap) + if err != nil { + return false, fmt.Errorf("failed to unmarshal data, %w", err) + } + + params := make(map[string]interface{}) + for key, value := range dataMap { + params[strings.ReplaceAll(key, "-", "_")] = value + } + env := expr.GetFuncMap(params) + return expr.EvalBool(filter.Expression, env) +} diff --git a/eventsources/persist/event_persist.go b/eventsources/persist/event_persist.go index 0d2f6ab5d2..b97eeac1d8 100644 --- a/eventsources/persist/event_persist.go +++ b/eventsources/persist/event_persist.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "github.com/pkg/errors" v1 "k8s.io/api/core/v1" apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -73,10 +72,10 @@ func (cmp *ConfigMapPersist) IsEnabled() bool { func (cmp *ConfigMapPersist) Save(event *Event) error { if event == nil { - return errors.Errorf("event object is nil") + return fmt.Errorf("event object is nil") } - //Using Connect util func for backoff retry if K8s API returns error - err := common.Connect(&common.DefaultBackoff, func() error { + // Using Connect util func for backoff retry if K8s API returns error + err := common.DoWithRetry(&common.DefaultBackoff, func() error { cm, err := cmp.kubeClient.CoreV1().ConfigMaps(cmp.namespace).Get(cmp.ctx, cmp.name, metav1.GetOptions{}) if err != nil { if apierr.IsNotFound(err) && cmp.createIfNotExist { diff --git a/eventsources/sources/amqp/start.go b/eventsources/sources/amqp/start.go index b48a01674d..abfc7f87cc 100644 --- a/eventsources/sources/amqp/start.go +++ b/eventsources/sources/amqp/start.go @@ -19,14 +19,17 @@ package amqp import ( "context" "encoding/json" + "fmt" "time" - "github.com/pkg/errors" - amqplib "github.com/streadway/amqp" + "sigs.k8s.io/yaml" + + amqplib "github.com/rabbitmq/amqp091-go" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -58,7 +61,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) @@ -67,7 +70,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt amqpEventSource := &el.AMQPEventSource var conn *amqplib.Connection - if err := common.Connect(amqpEventSource.ConnectionBackoff, func() error { + if err := common.DoWithRetry(amqpEventSource.ConnectionBackoff, func() error { c := amqplib.Config{ Heartbeat: 10 * time.Second, Locale: "en_US", @@ -75,38 +78,47 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if amqpEventSource.TLS != nil { tlsConfig, err := common.GetTLSConfig(amqpEventSource.TLS) if err != nil { - return errors.Wrap(err, "failed to get the tls configuration") + return fmt.Errorf("failed to get the tls configuration, %w", err) } c.TLSClientConfig = tlsConfig } if amqpEventSource.Auth != nil { username, err := common.GetSecretFromVolume(amqpEventSource.Auth.Username) if err != nil { - return errors.Wrap(err, "username not founnd") + return fmt.Errorf("username not found, %w", err) } password, err := common.GetSecretFromVolume(amqpEventSource.Auth.Password) if err != nil { - return errors.Wrap(err, "password not founnd") + return fmt.Errorf("password not found, %w", err) } - c.SASL = []amqplib.Authentication{&amqplib.AMQPlainAuth{ + c.SASL = []amqplib.Authentication{&amqplib.PlainAuth{ Username: username, Password: password, }} } var err error - conn, err = amqplib.DialConfig(amqpEventSource.URL, c) + var url string + if amqpEventSource.URLSecret != nil { + url, err = common.GetSecretFromVolume(amqpEventSource.URLSecret) + if err != nil { + return fmt.Errorf("urlSecret not found, %w", err) + } + } else { + url = amqpEventSource.URL + } + conn, err = amqplib.DialConfig(url, c) if err != nil { return err } return nil }); err != nil { - return errors.Wrapf(err, "failed to connect to amqp broker for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to connect to amqp broker for the event source %s, %w", el.GetEventName(), err) } log.Info("opening the server channel...") ch, err := conn.Channel() if err != nil { - return errors.Wrapf(err, "failed to open the channel for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to open the channel for the event source %s, %w", el.GetEventName(), err) } log.Info("checking parameters and set defaults...") @@ -115,7 +127,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("setting up the delivery channel...") delivery, err := getDelivery(ch, amqpEventSource) if err != nil { - return errors.Wrapf(err, "failed to get the delivery for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to get the delivery for the event source %s, %w", el.GetEventName(), err) } if amqpEventSource.JSONBody { @@ -128,7 +140,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt case msg, ok := <-delivery: if !ok { log.Error("failed to read a message, channel might have been closed") - return errors.New("channel might have been closed") + return fmt.Errorf("channel might have been closed") } if err := el.handleOne(amqpEventSource, msg, dispatch, log); err != nil { log.Errorw("failed to process an AMQP message", zap.Error(err)) @@ -144,7 +156,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } } -func (el *EventListener) handleOne(amqpEventSource *v1alpha1.AMQPEventSource, msg amqplib.Delivery, dispatch func([]byte) error, log *zap.SugaredLogger) error { +func (el *EventListener) handleOne(amqpEventSource *v1alpha1.AMQPEventSource, msg amqplib.Delivery, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) @@ -174,12 +186,12 @@ func (el *EventListener) handleOne(amqpEventSource *v1alpha1.AMQPEventSource, ms bodyBytes, err := json.Marshal(body) if err != nil { - return errors.Wrapf(err, "failed to marshal the message, message-id: %s", msg.MessageId) + return fmt.Errorf("failed to marshal the message, message-id: %s, %w", msg.MessageId, err) } log.Info("dispatching event ...") if err = dispatch(bodyBytes); err != nil { - return errors.Wrap(err, "failed to dispatch AMQP event") + return fmt.Errorf("failed to dispatch AMQP event, %w", err) } return nil } @@ -235,7 +247,11 @@ func getDelivery(ch *amqplib.Channel, eventSource *v1alpha1.AMQPEventSource) (<- nil, ) if err != nil { - return nil, errors.Errorf("failed to declare exchange with name %s and type %s. err: %+v", eventSource.ExchangeName, eventSource.ExchangeType, err) + return nil, fmt.Errorf("failed to declare exchange with name %s and type %s. err: %w", eventSource.ExchangeName, eventSource.ExchangeType, err) + } + optionalArguments, err := parseYamlTable(eventSource.QueueDeclare.Arguments) + if err != nil { + return nil, fmt.Errorf("failed to parse optional queue declare table arguments from Yaml string: %w", err) } q, err := ch.QueueDeclare( @@ -244,10 +260,10 @@ func getDelivery(ch *amqplib.Channel, eventSource *v1alpha1.AMQPEventSource) (<- eventSource.QueueDeclare.AutoDelete, eventSource.QueueDeclare.Exclusive, eventSource.QueueDeclare.NoWait, - nil, + optionalArguments, ) if err != nil { - return nil, errors.Errorf("failed to declare queue: %s", err) + return nil, fmt.Errorf("failed to declare queue: %w", err) } err = ch.QueueBind( @@ -258,7 +274,7 @@ func getDelivery(ch *amqplib.Channel, eventSource *v1alpha1.AMQPEventSource) (<- nil, ) if err != nil { - return nil, errors.Errorf("failed to bind %s exchange '%s' to queue with routingKey: %s: %s", eventSource.ExchangeType, eventSource.ExchangeName, eventSource.RoutingKey, err) + return nil, fmt.Errorf("failed to bind %s exchange '%s' to queue with routingKey: %s: %w", eventSource.ExchangeType, eventSource.ExchangeName, eventSource.RoutingKey, err) } delivery, err := ch.Consume( @@ -271,7 +287,20 @@ func getDelivery(ch *amqplib.Channel, eventSource *v1alpha1.AMQPEventSource) (<- nil, ) if err != nil { - return nil, errors.Errorf("failed to begin consuming messages: %s", err) + return nil, fmt.Errorf("failed to begin consuming messages: %w", err) } return delivery, nil } + +func parseYamlTable(argString string) (amqplib.Table, error) { + if argString == "" { + return nil, nil + } + var table amqplib.Table + args := []byte(argString) + err := yaml.Unmarshal(args, &table) + if err != nil { + return nil, fmt.Errorf("unmarshalling Yaml to Table type. Args: %s. Err: %w", argString, err) + } + return table, nil +} diff --git a/eventsources/sources/amqp/start_test.go b/eventsources/sources/amqp/start_test.go new file mode 100644 index 0000000000..5381182618 --- /dev/null +++ b/eventsources/sources/amqp/start_test.go @@ -0,0 +1,42 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package amqp + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseYamlTable(t *testing.T) { + table, err := parseYamlTable("") + assert.Nil(t, err) + assert.Nil(t, table) + table, err = parseYamlTable(`:noKey`) + assert.NotNil(t, err) + assert.Nil(t, table) + table, err = parseYamlTable("x-queue-type: quorum") + assert.Nil(t, err) + assert.NotNil(t, table) + assert.True(t, len(table) == 1) + table, err = parseYamlTable("key-one: thing1\nkey-two: thing2") + assert.Nil(t, err) + assert.NotNil(t, table) + assert.True(t, len(table) == 2) + assert.Equal(t, "thing1", table["key-one"].(string)) + assert.Equal(t, "thing2", table["key-two"].(string)) +} diff --git a/eventsources/sources/amqp/validate.go b/eventsources/sources/amqp/validate.go index bb82098c05..02a0963ca2 100644 --- a/eventsources/sources/amqp/validate.go +++ b/eventsources/sources/amqp/validate.go @@ -18,11 +18,11 @@ package amqp import ( "context" + "fmt" "github.com/argoproj/argo-events/common" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" - "github.com/pkg/errors" ) // ValidateEventSource validates gateway event source @@ -34,17 +34,17 @@ func validate(eventSource *v1alpha1.AMQPEventSource) error { if eventSource == nil { return common.ErrNilEventSource } - if eventSource.URL == "" { - return errors.New("url must be specified") + if eventSource.URL == "" && eventSource.URLSecret == nil { + return fmt.Errorf("either url or urlSecret must be specified") } - if eventSource.RoutingKey == "" { - return errors.New("routing key must be specified") + if eventSource.URL != "" && eventSource.URLSecret != nil { + return fmt.Errorf("only one of url or urlSecret can be specified") } - if eventSource.ExchangeName == "" { - return errors.New("exchange name must be specified") + if eventSource.RoutingKey == "" { + return fmt.Errorf("routing key must be specified") } if eventSource.ExchangeType == "" { - return errors.New("exchange type must be specified") + return fmt.Errorf("exchange type must be specified") } if eventSource.TLS != nil { return apicommon.ValidateTLSConfig(eventSource.TLS) diff --git a/eventsources/sources/amqp/validate_test.go b/eventsources/sources/amqp/validate_test.go index 4f45a6edae..0e54b1e5c5 100644 --- a/eventsources/sources/amqp/validate_test.go +++ b/eventsources/sources/amqp/validate_test.go @@ -19,7 +19,7 @@ package amqp import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -33,9 +33,9 @@ func TestValidateEventSource(t *testing.T) { err := listener.ValidateEventSource(context.Background()) assert.Error(t, err) - assert.Equal(t, "url must be specified", err.Error()) + assert.Equal(t, "either url or urlSecret must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "amqp.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "amqp.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/awssns/start.go b/eventsources/sources/awssns/start.go index 5dcadb9aca..bc91749c48 100644 --- a/eventsources/sources/awssns/start.go +++ b/eventsources/sources/awssns/start.go @@ -23,19 +23,22 @@ import ( "encoding/base64" "encoding/json" "encoding/pem" - "io/ioutil" + "fmt" + "io" "net/http" + "net/url" "reflect" "regexp" "time" + "github.com/aws/aws-sdk-go/aws" snslib "github.com/aws/aws-sdk-go/service/sns" "github.com/ghodss/yaml" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" commonaws "github.com/argoproj/argo-events/eventsources/common/aws" "github.com/argoproj/argo-events/eventsources/common/webhook" "github.com/argoproj/argo-events/eventsources/sources" @@ -113,7 +116,8 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ route.Metrics.EventProcessingDuration(route.EventSourceName, route.EventName, float64(time.Since(start)/time.Millisecond)) }(time.Now()) - body, err := ioutil.ReadAll(request.Body) + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) + body, err := io.ReadAll(request.Body) if err != nil { logger.Errorw("failed to parse the request body", zap.Error(err)) common.SendErrorResponse(writer, err.Error()) @@ -207,12 +211,17 @@ func (router *Router) PostActivate() error { snsEventSource := router.eventSource - awsSession, err := commonaws.CreateAWSSessionWithCredsInVolume(snsEventSource.Region, snsEventSource.RoleARN, snsEventSource.AccessKey, snsEventSource.SecretKey) + awsSession, err := commonaws.CreateAWSSessionWithCredsInVolume(snsEventSource.Region, snsEventSource.RoleARN, snsEventSource.AccessKey, snsEventSource.SecretKey, nil) if err != nil { return err } - router.session = snslib.New(awsSession) + if snsEventSource.Endpoint == "" { + router.session = snslib.New(awsSession) + } else { + router.session = snslib.New(awsSession, &aws.Config{Endpoint: &snsEventSource.Endpoint, Region: &snsEventSource.Region}) + } + formattedURL := common.FormattedURL(snsEventSource.Webhook.URL, snsEventSource.Webhook.Endpoint) if _, err := router.session.Subscribe(&snslib.SubscribeInput{ Endpoint: &formattedURL, @@ -267,7 +276,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts an SNS event source -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { logger := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) @@ -284,36 +293,56 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt }, controller, dispatch) } +func (m *httpNotification) verifySigningCertUrl() error { + regexSigningCertHost := `^sns\.[a-zA-Z0-9\-]{3,}\.amazonaws\.com(\.cn)?$` + regex := regexp.MustCompile(regexSigningCertHost) + url, err := url.Parse(m.SigningCertURL) + if err != nil { + return fmt.Errorf("SigningCertURL is not a valid URL, %w", err) + } + if !regex.MatchString(url.Hostname()) { + return fmt.Errorf("SigningCertURL hostname `%s` does not match `%s`", url.Hostname(), regexSigningCertHost) + } + if url.Scheme != "https" { + return fmt.Errorf("SigningCertURL is not using https") + } + return nil +} + func (m *httpNotification) verify() error { msgSig, err := base64.StdEncoding.DecodeString(m.Signature) if err != nil { - return errors.Wrap(err, "failed to base64 decode signature") + return fmt.Errorf("failed to base64 decode signature, %w", err) + } + + if err := m.verifySigningCertUrl(); err != nil { + return fmt.Errorf("failed to verify SigningCertURL, %w", err) } res, err := http.Get(m.SigningCertURL) if err != nil { - return errors.Wrap(err, "failed to fetch signing cert") + return fmt.Errorf("failed to fetch signing cert, %w", err) } defer res.Body.Close() - body, err := ioutil.ReadAll(res.Body) + body, err := io.ReadAll(io.LimitReader(res.Body, 65*1024)) if err != nil { - return errors.Wrap(err, "failed to read signing cert body") + return fmt.Errorf("failed to read signing cert body, %w", err) } p, _ := pem.Decode(body) if p == nil { - return errors.New("nothing found in pem encoded bytes") + return fmt.Errorf("nothing found in pem encoded bytes") } cert, err := x509.ParseCertificate(p.Bytes) if err != nil { - return errors.Wrap(err, "failed to parse signing cert") + return fmt.Errorf("failed to parse signing cert, %w", err) } err = cert.CheckSignature(x509.SHA1WithRSA, m.sigSerialized(), msgSig) if err != nil { - return errors.Wrap(err, "message signature check error") + return fmt.Errorf("message signature check error, %w", err) } return nil diff --git a/eventsources/sources/awssns/start_test.go b/eventsources/sources/awssns/start_test.go new file mode 100644 index 0000000000..d8f8b8b7ba --- /dev/null +++ b/eventsources/sources/awssns/start_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package awssns + +import ( + "testing" +) + +func Test_httpNotification_verifySigningCertUrl(t *testing.T) { + type fields struct { + SigningCertURL string + } + tests := map[string]struct { + fields fields + wantErr bool + }{ + "valid": {fields{"https://sns.us-west-2.amazonaws.com/SimpleNotificationService-123.pem"}, false}, + "without https": {fields{"http://sns.us-west-2.amazonaws.com/SimpleNotificationService-123.pem"}, true}, + "invalid hostname": {fields{"https://sns.us-west-2.amazonaws-malicious.com/SimpleNotificationService-123.pem"}, true}, + "invalid subdomain": {fields{"https://other.us-west-2.amazonaws.com/SimpleNotificationService-123.pem"}, true}, + } + for name, tt := range tests { + name, tt := name, tt + t.Run(name, func(t *testing.T) { + m := &httpNotification{ + SigningCertURL: tt.fields.SigningCertURL, + } + if err := m.verifySigningCertUrl(); (err != nil) != tt.wantErr { + t.Errorf("httpNotification.verifySigningCertUrl() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/eventsources/sources/awssns/validate_test.go b/eventsources/sources/awssns/validate_test.go index 9617ec5853..be23c558df 100644 --- a/eventsources/sources/awssns/validate_test.go +++ b/eventsources/sources/awssns/validate_test.go @@ -19,7 +19,7 @@ package awssns import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -36,7 +36,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "must specify topic arn", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "aws-sns.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "aws-sns.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/awssqs/start.go b/eventsources/sources/awssqs/start.go index fe43577742..e83c239ec1 100644 --- a/eventsources/sources/awssqs/start.go +++ b/eventsources/sources/awssqs/start.go @@ -19,15 +19,17 @@ package awssqs import ( "context" "encoding/json" + "fmt" "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/session" sqslib "github.com/aws/aws-sdk-go/service/sqs" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" awscommon "github.com/argoproj/argo-events/eventsources/common/aws" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" @@ -60,22 +62,18 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the AWS SQS event source...") defer sources.Recover(el.GetEventName()) sqsEventSource := &el.SQSEventSource - var awsSession *session.Session - awsSession, err := awscommon.CreateAWSSessionWithCredsInVolume(sqsEventSource.Region, sqsEventSource.RoleARN, sqsEventSource.AccessKey, sqsEventSource.SecretKey) + sqsClient, err := el.createSqsClient() if err != nil { - log.Errorw("Error creating AWS credentials", zap.Error(err)) - return errors.Wrapf(err, "failed to create aws session for %s", el.GetEventName()) + return err } - sqsClient := sqslib.New(awsSession) - log.Info("fetching queue url...") getQueueURLInput := &sqslib.GetQueueUrlInput{ QueueName: &sqsEventSource.Queue, @@ -87,7 +85,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt queueURL, err := sqsClient.GetQueueUrl(getQueueURLInput) if err != nil { log.Errorw("Error getting SQS Queue URL", zap.Error(err)) - return errors.Wrapf(err, "failed to get the queue url for %s", el.GetEventName()) + return fmt.Errorf("failed to get the queue url for %s, %w", el.GetEventName(), err) } if sqsEventSource.JSONBody { @@ -105,24 +103,45 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt messages, err := fetchMessages(ctx, sqsClient, *queueURL.QueueUrl, 10, sqsEventSource.WaitTimeSeconds) if err != nil { log.Errorw("failed to get messages from SQS", zap.Error(err)) + awsError, ok := err.(awserr.Error) + if ok && awsError.Code() == "ExpiredToken" && el.SQSEventSource.SessionToken != nil { + log.Info("credentials expired, reading credentials again") + newSqsClient, err := el.createSqsClient() + if err != nil { + log.Errorw("Error creating SQS client", zap.Error(err)) + } else if newSqsClient != nil { + sqsClient = newSqsClient + } + } + time.Sleep(2 * time.Second) continue } for _, m := range messages { - el.processMessage(ctx, m, dispatch, func() { + el.processMessage(m, dispatch, func() { _, err = sqsClient.DeleteMessage(&sqslib.DeleteMessageInput{ QueueUrl: queueURL.QueueUrl, ReceiptHandle: m.ReceiptHandle, }) if err != nil { log.Errorw("Failed to delete message", zap.Error(err)) + awsError, ok := err.(awserr.Error) + if ok && awsError.Code() == "ExpiredToken" && el.SQSEventSource.SessionToken != nil { + log.Info("credentials expired, reading credentials again") + newSqsClient, err := el.createSqsClient() + if err != nil { + log.Errorw("Error creating SQS client", zap.Error(err)) + } else if newSqsClient != nil { + sqsClient = newSqsClient + } + } } }, log) } } } -func (el *EventListener) processMessage(ctx context.Context, message *sqslib.Message, dispatch func([]byte) error, ack func(), log *zap.SugaredLogger) { +func (el *EventListener) processMessage(message *sqslib.Message, dispatch func([]byte, ...eventsourcecommon.Option) error, ack func(), log *zap.SugaredLogger) { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) @@ -142,7 +161,10 @@ func (el *EventListener) processMessage(ctx context.Context, message *sqslib.Mes if err != nil { log.Errorw("failed to marshal event data, will process next message...", zap.Error(err)) el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) - ack() + // Don't ack if a DLQ is configured to allow to forward the message to the DLQ + if !el.SQSEventSource.DLQ { + ack() + } return } if err = dispatch(eventBytes); err != nil { @@ -175,3 +197,28 @@ func fetchMessages(ctx context.Context, q *sqslib.SQS, url string, maxSize, wait } return result.Messages, nil } + +func (el *EventListener) createAWSSession() (*session.Session, error) { + sqsEventSource := &el.SQSEventSource + awsSession, err := awscommon.CreateAWSSessionWithCredsInVolume(sqsEventSource.Region, sqsEventSource.RoleARN, sqsEventSource.AccessKey, sqsEventSource.SecretKey, sqsEventSource.SessionToken) + if err != nil { + return nil, fmt.Errorf("failed to create aws session for %s, %w", el.GetEventName(), err) + } + return awsSession, nil +} + +func (el *EventListener) createSqsClient() (*sqslib.SQS, error) { + awsSession, err := el.createAWSSession() + if err != nil { + return nil, err + } + + var sqsClient *sqslib.SQS + if el.SQSEventSource.Endpoint == "" { + sqsClient = sqslib.New(awsSession) + } else { + sqsClient = sqslib.New(awsSession, &aws.Config{Endpoint: &el.SQSEventSource.Endpoint, Region: &el.SQSEventSource.Region}) + } + + return sqsClient, nil +} diff --git a/eventsources/sources/awssqs/validate_test.go b/eventsources/sources/awssqs/validate_test.go index 4a849cc625..04c8cc2982 100644 --- a/eventsources/sources/awssqs/validate_test.go +++ b/eventsources/sources/awssqs/validate_test.go @@ -19,7 +19,7 @@ package awssqs import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -40,7 +40,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "must specify queue name", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "aws-sqs.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "aws-sqs.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/azureeventshub/start.go b/eventsources/sources/azureeventshub/start.go index 0ff867dee9..ac0e1cc407 100644 --- a/eventsources/sources/azureeventshub/start.go +++ b/eventsources/sources/azureeventshub/start.go @@ -23,11 +23,11 @@ import ( "time" eventhub "github.com/Azure/azure-event-hubs-go/v3" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -59,7 +59,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the Azure Events Hub event source...") @@ -69,13 +69,13 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("retrieving the shared access key name...") sharedAccessKeyName, err := common.GetSecretFromVolume(hubEventSource.SharedAccessKeyName) if err != nil { - return errors.Wrapf(err, "failed to retrieve the shared access key name from secret %s", hubEventSource.SharedAccessKeyName.Name) + return fmt.Errorf("failed to retrieve the shared access key name from secret %s, %w", hubEventSource.SharedAccessKeyName.Name, err) } log.Info("retrieving the shared access key...") sharedAccessKey, err := common.GetSecretFromVolume(hubEventSource.SharedAccessKey) if err != nil { - return errors.Wrapf(err, "failed to retrieve the shared access key from secret %s", hubEventSource.SharedAccessKey.Name) + return fmt.Errorf("failed to retrieve the shared access key from secret %s, %w", hubEventSource.SharedAccessKey.Name, err) } endpoint := fmt.Sprintf("Endpoint=sb://%s/;SharedAccessKeyName=%s;SharedAccessKey=%s;EntityPath=%s", hubEventSource.FQDN, sharedAccessKeyName, sharedAccessKey, hubEventSource.HubName) @@ -83,7 +83,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("connecting to the hub...") hub, err := eventhub.NewHubFromConnectionString(endpoint) if err != nil { - return errors.Wrapf(err, "failed to connect to the hub %s", hubEventSource.HubName) + return fmt.Errorf("failed to connect to the hub %s, %w", hubEventSource.HubName, err) } handler := func(c context.Context, event *eventhub.Event) error { @@ -105,7 +105,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt eventBytes, err := json.Marshal(eventData) if err != nil { el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) - return errors.Wrapf(err, "failed to marshal the event data for event source %s and message id %s", el.GetEventName(), event.ID) + return fmt.Errorf("failed to marshal the event data for event source %s and message id %s, %w", el.GetEventName(), event.ID, err) } log.Info("dispatching the event to eventbus...") @@ -121,21 +121,21 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("gathering the hub runtime information...") runtimeInfo, err := hub.GetRuntimeInformation(ctx) if err != nil { - return errors.Wrapf(err, "failed to get the hub runtime information for %s", el.GetEventName()) + return fmt.Errorf("failed to get the hub runtime information for %s, %w", el.GetEventName(), err) } if runtimeInfo == nil { - return errors.Wrapf(err, "runtime information is not available for %s", el.GetEventName()) + return fmt.Errorf("runtime information is not available for %s, %w", el.GetEventName(), err) } if runtimeInfo.PartitionIDs == nil { - return errors.Wrapf(err, "no partition ids are available for %s", el.GetEventName()) + return fmt.Errorf("no partition ids are available for %s, %w", el.GetEventName(), err) } log.Info("handling the partitions...") for _, partitionID := range runtimeInfo.PartitionIDs { if _, err := hub.Receive(ctx, partitionID, handler, eventhub.ReceiveWithLatestOffset()); err != nil { - return errors.Wrapf(err, "failed to receive events from partition %s", partitionID) + return fmt.Errorf("failed to receive events from partition %s, %w", partitionID, err) } } diff --git a/eventsources/sources/azureeventshub/validate.go b/eventsources/sources/azureeventshub/validate.go index 5f4f79db33..cbfbf2ca98 100644 --- a/eventsources/sources/azureeventshub/validate.go +++ b/eventsources/sources/azureeventshub/validate.go @@ -18,10 +18,10 @@ package azureeventshub import ( "context" + "fmt" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" - "github.com/pkg/errors" ) // ValidateEventSource validates azure events hub event source @@ -34,16 +34,16 @@ func validate(eventSource *v1alpha1.AzureEventsHubEventSource) error { return common.ErrNilEventSource } if eventSource.FQDN == "" { - return errors.New("FQDN is not specified") + return fmt.Errorf("FQDN is not specified") } if eventSource.HubName == "" { - return errors.New("hub name/path is not specified") + return fmt.Errorf("hub name/path is not specified") } if eventSource.SharedAccessKey == nil { - return errors.New("SharedAccessKey is not specified") + return fmt.Errorf("SharedAccessKey is not specified") } if eventSource.SharedAccessKeyName == nil { - return errors.New("SharedAccessKeyName is not specified") + return fmt.Errorf("SharedAccessKeyName is not specified") } return nil } diff --git a/eventsources/sources/azureeventshub/validate_test.go b/eventsources/sources/azureeventshub/validate_test.go index d5a26f043a..dbbf9b4b0a 100644 --- a/eventsources/sources/azureeventshub/validate_test.go +++ b/eventsources/sources/azureeventshub/validate_test.go @@ -19,7 +19,7 @@ package azureeventshub import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -36,7 +36,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "FQDN is not specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "azure-events-hub.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "azure-events-hub.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/azurequeuestorage/start.go b/eventsources/sources/azurequeuestorage/start.go new file mode 100644 index 0000000000..a548dfbfd8 --- /dev/null +++ b/eventsources/sources/azurequeuestorage/start.go @@ -0,0 +1,172 @@ +package azurequeuestorage + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + "github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + "github.com/argoproj/argo-events/eventsources/sources" + metrics "github.com/argoproj/argo-events/metrics" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/events" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// EventListener implements Eventing for azure events hub event source +type EventListener struct { + EventSourceName string + EventName string + AzureQueueStorageEventSource v1alpha1.AzureQueueStorageEventSource + Metrics *metrics.Metrics +} + +// GetEventSourceName returns name of event source +func (el *EventListener) GetEventSourceName() string { + return el.EventSourceName +} + +// GetEventName returns name of event +func (el *EventListener) GetEventName() string { + return el.EventName +} + +// GetEventSourceType return type of event server +func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { + return apicommon.AzureQueueStorage +} + +// StartListening starts listening events +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + log := logging.FromContext(ctx). + With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) + + log.Info("started processing the Azure Queue Storage event source...") + defer sources.Recover(el.GetEventName()) + + queueStorageEventSource := &el.AzureQueueStorageEventSource + var client *azqueue.ServiceClient + // if connectionString is set then use it + // otherwise try to connect via Azure Active Directory (AAD) with storageAccountName + if queueStorageEventSource.ConnectionString != nil { + connStr, err := common.GetSecretFromVolume(queueStorageEventSource.ConnectionString) + if err != nil { + log.With("connection-string", queueStorageEventSource.ConnectionString.Name).Errorw("failed to retrieve connection string from secret", zap.Error(err)) + return err + } + + log.Info("connecting to azure queue storage with connection string...") + client, err = azqueue.NewServiceClientFromConnectionString(connStr, nil) + if err != nil { + log.Errorw("failed to create a service client", zap.Error(err)) + return err + } + } else { + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Errorw("failed to create DefaultAzureCredential", zap.Error(err)) + return err + } + log.Info("connecting to azure queue storage with AAD credentials...") + serviceURL := fmt.Sprintf("https://%s.queue.core.windows.net/", queueStorageEventSource.StorageAccountName) + client, err = azqueue.NewServiceClient(serviceURL, cred, nil) + if err != nil { + log.Errorw("failed to create a service client", zap.Error(err)) + return err + } + } + + queueClient := client.NewQueueClient(el.AzureQueueStorageEventSource.QueueName) + if queueStorageEventSource.JSONBody { + log.Info("assuming all events have a json body...") + } + var numMessages int32 = 10 + var visibilityTimeout int32 = 120 + var waitTime int32 = 3 // Defaults to 3 seconds + if el.AzureQueueStorageEventSource.WaitTimeInSeconds != nil { + waitTime = *el.AzureQueueStorageEventSource.WaitTimeInSeconds + } + log.Info("listening for messages on the queue...") + for { + select { + case <-ctx.Done(): + log.Info("exiting AQS event listener...") + return nil + default: + } + log.Info("dequeing messages....") + messages, err := queueClient.DequeueMessages(ctx, &azqueue.DequeueMessagesOptions{ + NumberOfMessages: &numMessages, + VisibilityTimeout: &visibilityTimeout, + }) + if err != nil { + log.Errorw("failed to get messages from AQS", zap.Error(err)) + time.Sleep(time.Second) + continue + } + for _, m := range messages.Messages { + el.processMessage(m, dispatch, func() { + _, err = queueClient.DeleteMessage(ctx, *m.MessageID, *m.PopReceipt, &azqueue.DeleteMessageOptions{}) + if err != nil { + log.Errorw("Failed to delete message", zap.Error(err)) + } + }, log) + } + if len(messages.Messages) == 0 { + time.Sleep(time.Second * time.Duration(waitTime)) + } + } +} + +func (el *EventListener) processMessage(message *azqueue.DequeuedMessage, dispatch func([]byte, ...eventsourcecommon.Option) error, ack func(), log *zap.SugaredLogger) { + defer func(start time.Time) { + el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) + }(time.Now()) + data := &events.AzureQueueStorageEventData{ + MessageID: *message.MessageID, + InsertionTime: *message.InsertionTime, + Metadata: el.AzureQueueStorageEventSource.Metadata, + } + body := []byte(*message.MessageText) + if el.AzureQueueStorageEventSource.DecodeMessage { + rawDecodedText, err := base64.URLEncoding.DecodeString(*message.MessageText) + if err != nil { + log.Errorw("failed to base64 decode message...", zap.Error(err)) + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + if !el.AzureQueueStorageEventSource.DLQ { + ack() + } + return + } + body = rawDecodedText + } + if el.AzureQueueStorageEventSource.JSONBody { + data.Body = (*json.RawMessage)(&body) + } else { + data.Body = body + } + eventBytes, err := json.Marshal(data) + if err != nil { + log.Errorw("failed to marshal event data, will process next message...", zap.Error(err)) + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + // Don't ack if a DLQ is configured to allow to forward the message to the DLQ + if !el.AzureQueueStorageEventSource.DLQ { + ack() + } + return + } + if err = dispatch(eventBytes); err != nil { + log.Errorw("failed to dispatch azure queue storage event", zap.Error(err)) + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + } else { + ack() + } +} diff --git a/eventsources/sources/azurequeuestorage/validate.go b/eventsources/sources/azurequeuestorage/validate.go new file mode 100644 index 0000000000..a59fbe9f79 --- /dev/null +++ b/eventsources/sources/azurequeuestorage/validate.go @@ -0,0 +1,43 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azurequeuestorage + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// ValidateEventSource validates azure queue storage event source +func (listener *EventListener) ValidateEventSource(ctx context.Context) error { + return validate(&listener.AzureQueueStorageEventSource) +} + +func validate(eventSource *v1alpha1.AzureQueueStorageEventSource) error { + if eventSource == nil { + return common.ErrNilEventSource + } + if eventSource.ConnectionString == nil && eventSource.StorageAccountName == "" { + return fmt.Errorf("must specify connection string or storageAccountName") + } + if eventSource.QueueName == "" { + return fmt.Errorf("must specify queue name") + } + return nil +} diff --git a/eventsources/sources/azurequeuestorage/validate_test.go b/eventsources/sources/azurequeuestorage/validate_test.go new file mode 100644 index 0000000000..58c9cd2e17 --- /dev/null +++ b/eventsources/sources/azurequeuestorage/validate_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azurequeuestorage + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +func TestValidateEventSource(t *testing.T) { + listener := &EventListener{} + + err := listener.ValidateEventSource(context.Background()) + assert.Error(t, err) + assert.Equal(t, "must specify connection string or storageAccountName", err.Error()) + + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "azure-queue-storage.yaml")) + assert.Nil(t, err) + + var eventSource *v1alpha1.EventSource + err = yaml.Unmarshal(content, &eventSource) + assert.Nil(t, err) + assert.NotNil(t, eventSource.Spec.AzureQueueStorage) + + for _, value := range eventSource.Spec.AzureQueueStorage { + l := &EventListener{ + AzureQueueStorageEventSource: value, + } + err := l.ValidateEventSource(context.Background()) + assert.NoError(t, err) + } +} diff --git a/eventsources/sources/azureservicebus/start.go b/eventsources/sources/azureservicebus/start.go new file mode 100644 index 0000000000..a7b9de9bef --- /dev/null +++ b/eventsources/sources/azureservicebus/start.go @@ -0,0 +1,195 @@ +package azureservicebus + +import ( + "context" + "encoding/json" + "time" + + "github.com/Azure/azure-sdk-for-go/sdk/azidentity" + servicebus "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + "github.com/argoproj/argo-events/eventsources/sources" + metrics "github.com/argoproj/argo-events/metrics" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/events" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// EventListener implements Eventing for azure events hub event source +type EventListener struct { + EventSourceName string + EventName string + AzureServiceBusEventSource v1alpha1.AzureServiceBusEventSource + Metrics *metrics.Metrics +} + +// GetEventSourceName returns name of event source +func (el *EventListener) GetEventSourceName() string { + return el.EventSourceName +} + +// GetEventName returns name of event +func (el *EventListener) GetEventName() string { + return el.EventName +} + +// GetEventSourceType return type of event server +func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { + return apicommon.AzureServiceBus +} + +type ReceiverType string + +const ( + ReceiverTypeQueue ReceiverType = "queue" + ReceiverTypeSubscription ReceiverType = "subscription" +) + +// StartListening starts listening events +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + log := logging.FromContext(ctx). + With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) + + log.Info("started processing the Azure Service Bus event source...") + defer sources.Recover(el.GetEventName()) + + servicebusEventSource := &el.AzureServiceBusEventSource + clientOptions := servicebus.ClientOptions{} + if servicebusEventSource.TLS != nil { + tlsConfig, err := common.GetTLSConfig(servicebusEventSource.TLS) + if err != nil { + log.Errorw("failed to get the tls configuration", zap.Error(err)) + return err + } + clientOptions.TLSConfig = tlsConfig + } + var client *servicebus.Client + if servicebusEventSource.ConnectionString != nil { + log.Info("connecting to the service bus using connection string...") + connStr, err := common.GetSecretFromVolume(servicebusEventSource.ConnectionString) + if err != nil { + log.With("connection-string", servicebusEventSource.ConnectionString.Name).Errorw("failed to retrieve connection string from secret", zap.Error(err)) + return err + } + client, err = servicebus.NewClientFromConnectionString(connStr, &clientOptions) + if err != nil { + log.Errorw("failed to create a service bus client", zap.Error(err)) + return err + } + } else { + log.Info("connecting to azure queue storage with AAD credentials...") + cred, err := azidentity.NewDefaultAzureCredential(nil) + if err != nil { + log.Errorw("failed to create DefaultAzureCredential", zap.Error(err)) + return err + } + client, err = servicebus.NewClient(servicebusEventSource.FullyQualifiedNamespace, cred, &clientOptions) + if err != nil { + log.Errorw("failed to create a service bus client", zap.Error(err)) + return err + } + } + + var receiver *servicebus.Receiver + var receiverType ReceiverType + var err error + + if servicebusEventSource.QueueName != "" { + log.Info("creating a queue receiver...") + receiverType = ReceiverTypeQueue + receiver, err = client.NewReceiverForQueue(servicebusEventSource.QueueName, &servicebus.ReceiverOptions{ + ReceiveMode: servicebus.ReceiveModeReceiveAndDelete, + }) + } else { + log.Info("creating a subscription receiver...") + receiverType = ReceiverTypeSubscription + receiver, err = client.NewReceiverForSubscription(servicebusEventSource.TopicName, servicebusEventSource.SubscriptionName, &servicebus.ReceiverOptions{ + ReceiveMode: servicebus.ReceiveModeReceiveAndDelete, + }) + } + if err != nil { + if receiverType == ReceiverTypeQueue { + log.With("queue", servicebusEventSource.QueueName).Errorw("failed to create a queue receiver", zap.Error(err)) + } else { + log.With("topic", servicebusEventSource.TopicName, "subscription", servicebusEventSource.SubscriptionName).Errorw("failed to create a receiver for subscription", zap.Error(err)) + } + return err + } + + if servicebusEventSource.JSONBody { + log.Info("assuming all events have a json body...") + } + + for { + select { + case <-ctx.Done(): + log.Info("context has been cancelled, stopping the Azure Service Bus event source...") + if err := receiver.Close(ctx); err != nil { + log.Errorw("failed to close the receiver", zap.Error(err)) + return err + } + return nil + default: + messages, err := receiver.ReceiveMessages(ctx, 1, nil) + if err != nil { + log.Errorw("failed to receive messages", zap.Error(err)) + continue + } + + for _, message := range messages { + if err := el.handleOne(servicebusEventSource, message, dispatch, log); err != nil { + if receiverType == ReceiverTypeQueue { + log.With("queue", servicebusEventSource.QueueName, "message_id", message.MessageID).Errorw("failed to process Azure Service Bus message", zap.Error(err)) + } else { + log.With("topic", servicebusEventSource.TopicName, "subscription", servicebusEventSource.SubscriptionName, "message_id", message.MessageID).Errorw("failed to process Azure Service Bus message", zap.Error(err)) + } + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + continue + } + } + } + } +} + +func (el *EventListener) handleOne(servicebusEventSource *v1alpha1.AzureServiceBusEventSource, message *servicebus.ReceivedMessage, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { + defer func(start time.Time) { + el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) + }(time.Now()) + + log.Infow("received the message", zap.Any("message_id", message.MessageID)) + eventData := &events.AzureServiceBusEventData{ + ApplicationProperties: message.ApplicationProperties, + ContentType: message.ContentType, + CorrelationID: message.CorrelationID, + EnqueuedTime: message.EnqueuedTime, + MessageID: message.MessageID, + ReplyTo: message.ReplyTo, + SequenceNumber: message.SequenceNumber, + Subject: message.Subject, + Metadata: servicebusEventSource.Metadata, + } + + if servicebusEventSource.JSONBody { + eventData.Body = (*json.RawMessage)(&message.Body) + } else { + eventData.Body = message.Body + } + + eventBytes, err := json.Marshal(eventData) + if err != nil { + log.With("event_source", el.GetEventSourceName(), "event", el.GetEventName(), "message_id", message.MessageID).Errorw("failed to marshal the event data", zap.Error(err)) + return err + } + + log.Info("dispatching the event to eventbus...") + if err = dispatch(eventBytes); err != nil { + log.With("event_source", el.GetEventSourceName(), "event", el.GetEventName(), "message_id", message.MessageID).Errorw("failed to dispatch the event", zap.Error(err)) + return err + } + + return nil +} diff --git a/eventsources/sources/azureservicebus/validate.go b/eventsources/sources/azureservicebus/validate.go new file mode 100644 index 0000000000..126241c9ec --- /dev/null +++ b/eventsources/sources/azureservicebus/validate.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azureservicebus + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// ValidateEventSource validates azure events hub event source +func (el *EventListener) ValidateEventSource(ctx context.Context) error { + return validate(&el.AzureServiceBusEventSource) +} + +func validate(eventSource *v1alpha1.AzureServiceBusEventSource) error { + if eventSource == nil { + return common.ErrNilEventSource + } + if eventSource.ConnectionString == nil && eventSource.FullyQualifiedNamespace == "" { + return fmt.Errorf("ConnectionString or fullyQualifiedNamespace must be specified") + } + if eventSource.QueueName == "" && (eventSource.TopicName == "" || eventSource.SubscriptionName == "") { + return fmt.Errorf("QueueName or TopicName/SubscriptionName must be specified") + } + if eventSource.QueueName != "" && (eventSource.TopicName != "" || eventSource.SubscriptionName != "") { + return fmt.Errorf("QueueName and TopicName/SubscriptionName cannot be specified at the same time") + } + if eventSource.TopicName == "" && eventSource.SubscriptionName != "" { + return fmt.Errorf("TopicName must be specified when SubscriptionName is specified") + } + if eventSource.TopicName != "" && eventSource.SubscriptionName == "" { + return fmt.Errorf("SubscriptionName must be specified when TopicName is specified") + } + + return nil +} diff --git a/eventsources/sources/azureservicebus/validate_test.go b/eventsources/sources/azureservicebus/validate_test.go new file mode 100644 index 0000000000..6c315914aa --- /dev/null +++ b/eventsources/sources/azureservicebus/validate_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azureservicebus + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +func TestValidateEventSource(t *testing.T) { + listener := &EventListener{} + + err := listener.ValidateEventSource(context.Background()) + assert.Error(t, err) + assert.Equal(t, "ConnectionString or fullyQualifiedNamespace must be specified", err.Error()) + + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "azure-service-bus.yaml")) + assert.Nil(t, err) + + var eventSource *v1alpha1.EventSource + err = yaml.Unmarshal(content, &eventSource) + assert.Nil(t, err) + assert.NotNil(t, eventSource.Spec.AzureServiceBus) + + for _, value := range eventSource.Spec.AzureServiceBus { + l := &EventListener{ + AzureServiceBusEventSource: value, + } + err := l.ValidateEventSource(context.Background()) + assert.NoError(t, err) + } +} diff --git a/eventsources/sources/bitbucket/basic_auth_strategy.go b/eventsources/sources/bitbucket/basic_auth_strategy.go new file mode 100644 index 0000000000..91a54c4c00 --- /dev/null +++ b/eventsources/sources/bitbucket/basic_auth_strategy.go @@ -0,0 +1,52 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucket + +import ( + "fmt" + + bitbucketv2 "github.com/ktrysmt/go-bitbucket" + corev1 "k8s.io/api/core/v1" + + "github.com/argoproj/argo-events/common" +) + +type BasicAuthStrategy struct { + username string + password string +} + +func NewBasicAuthStrategy(usernameSecret, passwordSecret *corev1.SecretKeySelector) (*BasicAuthStrategy, error) { + username, err := common.GetSecretFromVolume(usernameSecret) + if err != nil { + return nil, fmt.Errorf("failed to retrieve bitbucket username from secret, %w", err) + } + + password, err := common.GetSecretFromVolume(passwordSecret) + if err != nil { + return nil, fmt.Errorf("failed to retrieve bitbucket password from secret, %w", err) + } + + return &BasicAuthStrategy{ + username: username, + password: password, + }, nil +} + +// BitbucketClient implements the AuthStrategy interface. +func (as *BasicAuthStrategy) BitbucketClient() *bitbucketv2.Client { + return bitbucketv2.NewBasicAuth(as.username, as.password) +} diff --git a/eventsources/sources/bitbucket/oauth_token_auth_strategy.go b/eventsources/sources/bitbucket/oauth_token_auth_strategy.go new file mode 100644 index 0000000000..cf5f0881c9 --- /dev/null +++ b/eventsources/sources/bitbucket/oauth_token_auth_strategy.go @@ -0,0 +1,45 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucket + +import ( + "fmt" + + bitbucketv2 "github.com/ktrysmt/go-bitbucket" + corev1 "k8s.io/api/core/v1" + + "github.com/argoproj/argo-events/common" +) + +type OAuthTokenAuthStrategy struct { + token string +} + +func NewOAuthTokenAuthStrategy(oauthTokenSecret *corev1.SecretKeySelector) (*OAuthTokenAuthStrategy, error) { + token, err := common.GetSecretFromVolume(oauthTokenSecret) + if err != nil { + return nil, fmt.Errorf("failed to retrieve bitbucket oauth token from secret, %w", err) + } + + return &OAuthTokenAuthStrategy{ + token: token, + }, nil +} + +// BitbucketClient implements the AuthStrategy interface. +func (as *OAuthTokenAuthStrategy) BitbucketClient() *bitbucketv2.Client { + return bitbucketv2.NewOAuthbearerToken(as.token) +} diff --git a/eventsources/sources/bitbucket/start.go b/eventsources/sources/bitbucket/start.go new file mode 100644 index 0000000000..3a5a2babb7 --- /dev/null +++ b/eventsources/sources/bitbucket/start.go @@ -0,0 +1,328 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucket + +import ( + "context" + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + "math/big" + "net/http" + "time" + + bitbucketv2 "github.com/ktrysmt/go-bitbucket" + "github.com/mitchellh/mapstructure" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/events" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// controller controls the webhook operations +var ( + controller = webhook.NewController() +) + +// set up the activation and inactivation channels to control the state of routes. +func init() { + go webhook.ProcessRouteStatus(controller) +} + +// Implement Router +// 1. GetRoute +// 2. HandleRoute +// 3. PostActivate +// 4. PostInactivate + +// GetRoute returns the route +func (router *Router) GetRoute() *webhook.Route { + return router.route +} + +// HandleRoute handles incoming requests on the route +func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) { + route := router.GetRoute() + logger := route.Logger.With( + logging.LabelEndpoint, route.Context.Endpoint, + logging.LabelPort, route.Context.Port, + logging.LabelHTTPMethod, route.Context.Method, + ) + + logger.Info("received a request, processing it...") + + if !route.Active { + logger.Info("endpoint is not active, won't process the request") + common.SendErrorResponse(writer, "inactive endpoint") + return + } + + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) + body, err := io.ReadAll(request.Body) + if err != nil { + logger.Desugar().Error("failed to parse request body", zap.Error(err)) + common.SendErrorResponse(writer, err.Error()) + return + } + + event := &events.BitbucketEventData{ + Headers: request.Header, + Body: (*json.RawMessage)(&body), + Metadata: router.bitbucketEventSource.Metadata, + } + + eventBody, err := json.Marshal(event) + if err != nil { + logger.Info("failed to marshal event") + common.SendErrorResponse(writer, "invalid event") + return + } + + logger.Info("dispatching event on route's data channel") + route.DataCh <- eventBody + + logger.Info("request successfully processed") + common.SendSuccessResponse(writer, "success") +} + +// PostActivate performs operations once the route is activated and ready to consume requests +func (router *Router) PostActivate() error { + return nil +} + +// PostInactivate performs operations after the route is inactivated +func (router *Router) PostInactivate() error { + bitbucketEventSource := router.bitbucketEventSource + logger := router.GetRoute().Logger + + if bitbucketEventSource.DeleteHookOnFinish && len(router.hookIDs) > 0 { + logger.Info("deleting webhooks from bitbucket...") + + for _, repo := range bitbucketEventSource.GetBitbucketRepositories() { + hookID, ok := router.hookIDs[repo.GetRepositoryID()] + if !ok { + return fmt.Errorf("can not find hook ID for repo key: %s", repo.GetRepositoryID()) + } + + if err := router.deleteWebhook(repo, hookID); err != nil { + logger.Errorw("failed to delete webhook", + zap.String("owner", repo.Owner), zap.String("repository-slug", repo.RepositorySlug), zap.Error(err)) + return fmt.Errorf("failed to delete hook for repo %s/%s, %w", repo.Owner, repo.RepositorySlug, err) + } + + logger.Info("successfully deleted hook for repo", + zap.String("owner", repo.Owner), zap.String("repository-slug", repo.RepositorySlug)) + } + } + + return nil +} + +// StartListening starts an event source +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + defer sources.Recover(el.GetEventName()) + + bitbucketEventSource := &el.BitbucketEventSource + logger := logging.FromContext(ctx).With( + logging.LabelEventSourceType, el.GetEventSourceType(), + logging.LabelEventName, el.GetEventName(), + ) + + logger.Info("started processing the Bitbucket event source...") + route := webhook.NewRoute(bitbucketEventSource.Webhook, logger, el.GetEventSourceName(), el.GetEventName(), el.Metrics) + router := &Router{ + route: route, + bitbucketEventSource: bitbucketEventSource, + hookIDs: make(map[string]string), + } + + if !bitbucketEventSource.ShouldCreateWebhooks() { + logger.Info("access token or webhook configuration were not provided, skipping webhooks creation") + return webhook.ManageRoute(ctx, router, controller, dispatch) + } + + logger.Info("choosing bitbucket auth strategy...") + authStrategy, err := router.chooseAuthStrategy() + if err != nil { + return fmt.Errorf("failed to get bitbucket auth strategy, %w", err) + } + + router.client = authStrategy.BitbucketClient() + + applyWebhooks := func() { + for _, repo := range bitbucketEventSource.GetBitbucketRepositories() { + if err = router.applyBitbucketWebhook(repo); err != nil { + logger.Errorw("failed to apply Bitbucket webhook", + zap.String("owner", repo.Owner), zap.String("repository-slug", repo.RepositorySlug), zap.Error(err)) + continue + } + + time.Sleep(500 * time.Millisecond) + } + } + + // When running multiple replicas of the eventsource, they will all try to create the webhook. + // Randomly sleep some time to mitigate the issue. + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(5000))) + time.Sleep(time.Duration(randomNum.Int64()) * time.Millisecond) + applyWebhooks() + + return webhook.ManageRoute(ctx, router, controller, dispatch) +} + +// chooseAuthStrategy returns an AuthStrategy based on the given credentials +func (router *Router) chooseAuthStrategy() (AuthStrategy, error) { + es := router.bitbucketEventSource + switch { + case es.HasBitbucketBasicAuth(): + return NewBasicAuthStrategy(es.Auth.Basic.Username, es.Auth.Basic.Password) + case es.HasBitbucketOAuthToken(): + return NewOAuthTokenAuthStrategy(es.Auth.OAuthToken) + default: + return nil, fmt.Errorf("none of the supported auth options were provided") + } +} + +// applyBitbucketWebhook creates or updates the configured webhook in Bitbucket +func (router *Router) applyBitbucketWebhook(repo v1alpha1.BitbucketRepository) error { + bitbucketEventSource := router.bitbucketEventSource + route := router.route + logger := router.GetRoute().Logger.With( + logging.LabelEndpoint, route.Context.Endpoint, + logging.LabelPort, route.Context.Port, + logging.LabelHTTPMethod, route.Context.Method, + "owner", repo.Owner, + "repository-slug", repo.RepositorySlug, + ) + + formattedWebhookURL := common.FormattedURL(bitbucketEventSource.Webhook.URL, bitbucketEventSource.Webhook.Endpoint) + + logger.Info("listing existing webhooks...") + hooks, err := router.listWebhooks(repo) + if err != nil { + logger.Errorw("failed to list webhooks", zap.Error(err)) + return fmt.Errorf("failed to list webhooks, %w", err) + } + + logger.Info("checking if webhook already exists...") + existingHookSubscription, isFound := router.findWebhook(hooks, formattedWebhookURL) + if isFound { + logger.Info("webhook already exists, removing old webhook...") + if err := router.deleteWebhook(repo, existingHookSubscription.Uuid); err != nil { + logger.Errorw("failed to delete old webhook", + zap.String("owner", repo.Owner), zap.String("repository-slug", repo.RepositorySlug), zap.Error(err)) + return fmt.Errorf("failed to delete old webhook for repo %s/%s, %w", repo.Owner, repo.RepositorySlug, err) + } + } + + logger.Info("creating a new webhook...") + newWebhook, err := router.createWebhook(repo, formattedWebhookURL) + if err != nil { + logger.Errorw("failed to create new webhook", zap.Error(err)) + return fmt.Errorf("failed to create new webhook, %w", err) + } + + router.hookIDs[repo.GetRepositoryID()] = newWebhook.Uuid + + logger.Info("successfully created a new webhook") + return nil +} + +// createWebhook creates a new webhook +func (router *Router) createWebhook(repo v1alpha1.BitbucketRepository, formattedWebhookURL string) (*bitbucketv2.Webhook, error) { + opt := &bitbucketv2.WebhooksOptions{ + Owner: repo.Owner, + RepoSlug: repo.RepositorySlug, + Url: formattedWebhookURL, + Description: "webhook managed by Argo-Events", + Active: true, + Events: router.bitbucketEventSource.Events, + } + + return router.client.Repositories.Webhooks.Create(opt) +} + +// deleteWebhook deletes an existing webhook +func (router *Router) deleteWebhook(repo v1alpha1.BitbucketRepository, hookID string) error { + _, err := router.client.Repositories.Webhooks.Delete(&bitbucketv2.WebhooksOptions{ + Owner: repo.Owner, + RepoSlug: repo.RepositorySlug, + Uuid: hookID, + }) + if err != nil { + // Skip not found errors in case the webhook was already deleted + var bitbucketErr *bitbucketv2.UnexpectedResponseStatusError + if errors.As(err, &bitbucketErr) && bitbucketErr.Status == "404 Not Found" { + return nil + } + } + + return err +} + +// listWebhooks gets a list of all existing webhooks in target repository +func (router *Router) listWebhooks(repo v1alpha1.BitbucketRepository) ([]WebhookSubscription, error) { + hooksResponse, err := router.client.Repositories.Webhooks.Gets(&bitbucketv2.WebhooksOptions{ + Owner: repo.Owner, + RepoSlug: repo.RepositorySlug, + }) + if err != nil { + return nil, err + } + + return router.extractHooksFromListResponse(hooksResponse) +} + +// extractHooksFromListResponse helper that extracts the list of webhooks from the response of listWebhooks +func (router *Router) extractHooksFromListResponse(listHooksResponse interface{}) ([]WebhookSubscription, error) { + logger := router.GetRoute().Logger + res, ok := listHooksResponse.(map[string]interface{}) + if !ok { + logger.Errorw("failed to parse the list webhooks response", zap.Any("response", listHooksResponse)) + return nil, fmt.Errorf("failed to parse the list webhooks response") + } + + var hooks []WebhookSubscription + err := mapstructure.Decode(res["values"], &hooks) + if err != nil || hooks == nil { + logger.Errorw("failed to parse the list webhooks response", zap.Any("response", listHooksResponse)) + return nil, fmt.Errorf("failed to parse the list webhooks response") + } + + return hooks, nil +} + +// findWebhook searches for a webhook in a list by its URL and returns the webhook if its found +func (router *Router) findWebhook(hooks []WebhookSubscription, targetWebhookURL string) (*WebhookSubscription, bool) { + var existingHookSubscription *WebhookSubscription + isFound := false + for _, hook := range hooks { + if hook.Url == targetWebhookURL { + isFound = true + existingHookSubscription = &hook + break + } + } + + return existingHookSubscription, isFound +} diff --git a/eventsources/sources/bitbucket/types.go b/eventsources/sources/bitbucket/types.go new file mode 100644 index 0000000000..da50f180db --- /dev/null +++ b/eventsources/sources/bitbucket/types.go @@ -0,0 +1,84 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucket + +import ( + bitbucketv2 "github.com/ktrysmt/go-bitbucket" + + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/metrics" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// EventListener implements ConfigExecutor +type EventListener struct { + EventSourceName string + EventName string + BitbucketEventSource v1alpha1.BitbucketEventSource + Metrics *metrics.Metrics +} + +// GetEventSourceName returns name of event source +func (el *EventListener) GetEventSourceName() string { + return el.EventSourceName +} + +// GetEventName returns name of event +func (el *EventListener) GetEventName() string { + return el.EventName +} + +// GetEventSourceType return type of event server +func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { + return apicommon.BitbucketEvent +} + +// Router contains the configuration information for a route +type Router struct { + // route contains information about a API endpoint + route *webhook.Route + // client to connect to Bitbucket + client *bitbucketv2.Client + // bitbucketEventSource is the event source that holds information to consume events from Bitbucket + bitbucketEventSource *v1alpha1.BitbucketEventSource + // hookIDs is a map of webhook IDs + // (owner+","+repoSlug) -> hook ID + // Bitbucket API docs: + // https://developer.atlassian.com/cloud/bitbucket/rest/ + hookIDs map[string]string +} + +type WebhookSubscription struct { + // Uuid holds the webhook's ID + Uuid string `json:"uuid"` + // The Url events get delivered to. + Url string `json:"url"` + // Description holds a user-defined description of the webhook. + Description string `json:"description,omitempty"` + // Subject holds metadata about the subject of the webhook (repository, etc.) + Subject map[string]interface{} `json:"subject,omitempty"` + // Active refers to status of the webhook for event deliveries. + Active bool `json:"active,omitempty"` + // The Events this webhook is subscribed to. + Events []string `json:"events"` +} + +// AuthStrategy is implemented by the different Bitbucket auth strategies that are supported +type AuthStrategy interface { + // BitbucketClient returns a bitbucket client initialized with the specific auth strategy + BitbucketClient() *bitbucketv2.Client +} diff --git a/eventsources/sources/bitbucket/validate.go b/eventsources/sources/bitbucket/validate.go new file mode 100644 index 0000000000..ab48533e41 --- /dev/null +++ b/eventsources/sources/bitbucket/validate.go @@ -0,0 +1,43 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucket + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// ValidateEventSource validates bitbucketserver event source +func (el *EventListener) ValidateEventSource(ctx context.Context) error { + return validate(&el.BitbucketEventSource) +} + +func validate(eventSource *v1alpha1.BitbucketEventSource) error { + if eventSource == nil { + return common.ErrNilEventSource + } + if eventSource.GetBitbucketRepositories() == nil { + return fmt.Errorf("at least one repository is required") + } + if eventSource.ShouldCreateWebhooks() && len(eventSource.Events) == 0 { + return fmt.Errorf("events must be defined to create a bitbucket webhooks") + } + return webhook.ValidateWebhookContext(eventSource.Webhook) +} diff --git a/eventsources/sources/bitbucket/validate_test.go b/eventsources/sources/bitbucket/validate_test.go new file mode 100644 index 0000000000..a57f00fa54 --- /dev/null +++ b/eventsources/sources/bitbucket/validate_test.go @@ -0,0 +1,54 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucket + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +func TestValidateEventSource(t *testing.T) { + listener := &EventListener{} + + err := listener.ValidateEventSource(context.Background()) + assert.Error(t, err) + assert.Equal(t, "at least one repository is required", err.Error()) + + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "bitbucket.yaml")) + assert.Nil(t, err) + + var eventSource *v1alpha1.EventSource + err = yaml.Unmarshal(content, &eventSource) + assert.Nil(t, err) + assert.NotNil(t, eventSource.Spec.Bitbucket) + + for name, value := range eventSource.Spec.Bitbucket { + fmt.Println(name) + l := &EventListener{ + BitbucketEventSource: value, + } + err := l.ValidateEventSource(context.Background()) + assert.NoError(t, err) + } +} diff --git a/eventsources/sources/bitbucketserver/start.go b/eventsources/sources/bitbucketserver/start.go new file mode 100644 index 0000000000..39a659e030 --- /dev/null +++ b/eventsources/sources/bitbucketserver/start.go @@ -0,0 +1,711 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucketserver + +import ( + "context" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + bitbucketv1 "github.com/gfleury/go-bitbucket-v1" + "github.com/mitchellh/mapstructure" + "golang.org/x/exp/slices" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/events" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + "go.uber.org/zap" +) + +// controller controls the webhook operations +var ( + controller = webhook.NewController() +) + +// set up the activation and inactivation channels to control the state of routes. +func init() { + go webhook.ProcessRouteStatus(controller) +} + +// Implement Router +// 1. GetRoute +// 2. HandleRoute +// 3. PostActivate +// 4. PostDeactivate + +// GetRoute returns the route +func (router *Router) GetRoute() *webhook.Route { + return router.route +} + +// HandleRoute handles incoming requests on the route +func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) { + bitbucketserverEventSource := router.bitbucketserverEventSource + route := router.GetRoute() + + logger := route.Logger.With( + logging.LabelEndpoint, route.Context.Endpoint, + logging.LabelPort, route.Context.Port, + logging.LabelHTTPMethod, route.Context.Method, + ) + + logger.Info("received a request, processing it...") + + if !route.Active { + logger.Info("endpoint is not active, won't process the request") + common.SendErrorResponse(writer, "inactive endpoint") + return + } + + defer func(start time.Time) { + route.Metrics.EventProcessingDuration(route.EventSourceName, route.EventName, float64(time.Since(start)/time.Millisecond)) + }(time.Now()) + + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) + body, err := router.parseAndValidateBitbucketServerRequest(request) + if err != nil { + logger.Errorw("failed to parse/validate request", zap.Error(err)) + common.SendErrorResponse(writer, err.Error()) + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return + } + + // When SkipBranchRefsChangedOnOpenPR is enabled and webhook event type is repo:refs_changed, + // check if a Pull Request is opened for the commit, if one is opened the event will be skipped. + if bitbucketserverEventSource.SkipBranchRefsChangedOnOpenPR && slices.Contains(bitbucketserverEventSource.Events, "repo:refs_changed") { + refsChanged := refsChangedWebhookEvent{} + err := json.Unmarshal(body, &refsChanged) + if err != nil { + logger.Errorf("reading webhook body", zap.Error(err)) + common.SendErrorResponse(writer, err.Error()) + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return + } + + if refsChanged.EventKey == "repo:refs_changed" && + len(refsChanged.Changes) > 0 && // Note refsChanged.Changes never has more or less than one change, not sure why Atlassian made it a list. + strings.EqualFold(refsChanged.Changes[0].Ref.Type, "BRANCH") && + !strings.EqualFold(refsChanged.Changes[0].Type, "DELETE") { + // Check if commit is associated to an open PR. + hasOpenPR, err := router.refsChangedHasOpenPullRequest(refsChanged.Repository.Project.Key, refsChanged.Repository.Slug, refsChanged.Changes[0].ToHash) + if err != nil { + logger.Errorf("checking if changed branch ref has an open pull request", zap.Error(err)) + common.SendErrorResponse(writer, err.Error()) + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return + } + + // Do not publish this Branch repo:refs_changed event if a related Pull Request is already opened for the commit. + if hasOpenPR { + logger.Info("skipping publishing event, commit has an open pull request") + common.SendSuccessResponse(writer, "success") + return + } + } + } + + event := &events.BitbucketServerEventData{ + Headers: request.Header, + Body: (*json.RawMessage)(&body), + Metadata: router.bitbucketserverEventSource.Metadata, + } + + eventBody, err := json.Marshal(event) + if err != nil { + logger.Errorw("failed to parse event", zap.Error(err)) + common.SendErrorResponse(writer, "invalid event") + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return + } + + logger.Info("dispatching event on route's data channel") + route.DataCh <- eventBody + + logger.Info("request successfully processed") + common.SendSuccessResponse(writer, "success") +} + +// PostActivate performs operations once the route is activated and ready to consume requests +func (router *Router) PostActivate() error { + return nil +} + +// PostInactivate performs operations after the route is inactivated +func (router *Router) PostInactivate() error { + bitbucketserverEventSource := router.bitbucketserverEventSource + route := router.route + logger := route.Logger + + if !bitbucketserverEventSource.DeleteHookOnFinish { + logger.Info("not configured to delete webhooks, skipping") + return nil + } + + if len(router.hookIDs) == 0 { + logger.Info("no need to delete webhooks, skipping") + return nil + } + + logger.Info("deleting webhooks from bitbucket") + + bitbucketRepositories := bitbucketserverEventSource.GetBitbucketServerRepositories() + + if len(bitbucketserverEventSource.Projects) > 0 { + bitbucketProjectRepositories, err := getProjectRepositories(router.deleteClient, bitbucketserverEventSource.Projects) + if err != nil { + return err + } + + bitbucketRepositories = append(bitbucketRepositories, bitbucketProjectRepositories...) + } + + for _, repo := range bitbucketRepositories { + id, ok := router.hookIDs[repo.ProjectKey+","+repo.RepositorySlug] + if !ok { + return fmt.Errorf("can not find hook ID for project-key: %s, repository-slug: %s", repo.ProjectKey, repo.RepositorySlug) + } + + _, err := router.deleteClient.DefaultApi.DeleteWebhook(repo.ProjectKey, repo.RepositorySlug, int32(id)) + if err != nil { + return fmt.Errorf("failed to delete bitbucketserver webhook. err: %w", err) + } + + logger.Infow("bitbucket server webhook deleted", + zap.String("project-key", repo.ProjectKey), zap.String("repository-slug", repo.RepositorySlug)) + } + + return nil +} + +// StartListening starts an event source +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + defer sources.Recover(el.GetEventName()) + + bitbucketserverEventSource := &el.BitbucketServerEventSource + + logger := logging.FromContext(ctx).With( + logging.LabelEventSourceType, el.GetEventSourceType(), + logging.LabelEventName, el.GetEventName(), + "base-url", bitbucketserverEventSource.BitbucketServerBaseURL, + ) + + logger.Info("started processing the Bitbucket Server event source...") + + logger.Info("retrieving the access token credentials...") + bitbucketToken, err := common.GetSecretFromVolume(bitbucketserverEventSource.AccessToken) + if err != nil { + return fmt.Errorf("getting bitbucketserver token. err: %w", err) + } + + logger.Info("setting up the client to connect to Bitbucket Server...") + bitbucketConfig, err := newBitbucketServerClientCfg(bitbucketserverEventSource) + if err != nil { + return fmt.Errorf("initializing bitbucketserver client config. err: %w", err) + } + + bitbucketURL, err := url.Parse(bitbucketserverEventSource.BitbucketServerBaseURL) + if err != nil { + return fmt.Errorf("parsing bitbucketserver url. err: %w", err) + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + bitbucketClient := newBitbucketServerClient(ctx, bitbucketConfig, bitbucketToken) + bitbucketDeleteClient := newBitbucketServerClient(context.Background(), bitbucketConfig, bitbucketToken) + + route := webhook.NewRoute(bitbucketserverEventSource.Webhook, logger, el.GetEventSourceName(), el.GetEventName(), el.Metrics) + router := &Router{ + route: route, + client: bitbucketClient, + customClient: &customBitbucketClient{ + client: bitbucketConfig.HTTPClient, + ctx: ctx, + token: bitbucketToken, + url: bitbucketURL, + }, + deleteClient: bitbucketDeleteClient, + bitbucketserverEventSource: bitbucketserverEventSource, + hookIDs: make(map[string]int), + } + + if !bitbucketserverEventSource.ShouldCreateWebhooks() { + logger.Info("access token or webhook configuration were not provided, skipping webhooks creation") + return webhook.ManageRoute(ctx, router, controller, dispatch) + } + + if bitbucketserverEventSource.WebhookSecret != nil { + logger.Info("retrieving the webhook secret...") + webhookSecret, err := common.GetSecretFromVolume(bitbucketserverEventSource.WebhookSecret) + if err != nil { + return fmt.Errorf("getting bitbucketserver webhook secret. err: %w", err) + } + + router.hookSecret = webhookSecret + } + + applyWebhooks := func() { + bitbucketRepositories := bitbucketserverEventSource.GetBitbucketServerRepositories() + + if len(bitbucketserverEventSource.Projects) > 0 { + bitbucketProjectRepositories, err := getProjectRepositories(router.client, bitbucketserverEventSource.Projects) + if err != nil { + logger.Errorw("failed to apply Bitbucket webhook", zap.Error(err)) + } + + bitbucketRepositories = append(bitbucketRepositories, bitbucketProjectRepositories...) + } + + for _, repo := range bitbucketRepositories { + if err = router.applyBitbucketServerWebhook(repo); err != nil { + logger.Errorw("failed to apply Bitbucket webhook", + zap.String("project-key", repo.ProjectKey), zap.String("repository-slug", repo.RepositorySlug), zap.Error(err)) + continue + } + + time.Sleep(500 * time.Millisecond) + } + } + + // When running multiple replicas of this event source, they will try to create webhooks at the same time. + // Randomly delay running the initial apply webhooks func to mitigate the issue. + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(2000))) + time.Sleep(time.Duration(randomNum.Int64()) * time.Millisecond) + applyWebhooks() + + var checkInterval time.Duration + if bitbucketserverEventSource.CheckInterval == "" { + checkInterval = 60 * time.Second + } else { + checkInterval, err = time.ParseDuration(bitbucketserverEventSource.CheckInterval) + if err != nil { + return err + } + } + + go func() { + // Another kind of race conditions might happen when pods do rolling upgrade - new pod starts + // and old pod terminates, if DeleteHookOnFinish is true, the hook will be deleted from Bitbucket. + // This is a workaround to mitigate the race conditions. + logger.Info("starting bitbucket hooks manager daemon") + + ticker := time.NewTicker(checkInterval) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.Info("exiting bitbucket hooks manager daemon") + return + case <-ticker.C: + applyWebhooks() + } + } + }() + + return webhook.ManageRoute(ctx, router, controller, dispatch) +} + +// applyBitbucketServerWebhook creates or updates the configured webhook in Bitbucket +func (router *Router) applyBitbucketServerWebhook(repo v1alpha1.BitbucketServerRepository) error { + bitbucketserverEventSource := router.bitbucketserverEventSource + route := router.route + + logger := route.Logger.With( + logging.LabelEndpoint, route.Context.Endpoint, + logging.LabelPort, route.Context.Port, + logging.LabelHTTPMethod, route.Context.Method, + "project-key", repo.ProjectKey, + "repository-slug", repo.RepositorySlug, + "base-url", bitbucketserverEventSource.BitbucketServerBaseURL, + ) + + formattedURL := common.FormattedURL(bitbucketserverEventSource.Webhook.URL, bitbucketserverEventSource.Webhook.Endpoint) + + hooks, err := router.listWebhooks(repo) + if err != nil { + return fmt.Errorf("failed to list existing hooks to check for duplicates for repository %s/%s, %w", repo.ProjectKey, repo.RepositorySlug, err) + } + + var existingHook bitbucketv1.Webhook + isAlreadyExists := false + + for _, hook := range hooks { + if hook.Url == formattedURL { + isAlreadyExists = true + existingHook = hook + router.hookIDs[repo.ProjectKey+","+repo.RepositorySlug] = hook.ID + break + } + } + + newHook := bitbucketv1.Webhook{ + Name: "Argo Events", + Url: formattedURL, + Active: true, + Events: bitbucketserverEventSource.Events, + Configuration: bitbucketv1.WebhookConfiguration{Secret: router.hookSecret}, + } + + requestBody, err := router.createRequestBodyFromWebhook(newHook) + if err != nil { + return fmt.Errorf("failed to create request body from webhook, %w", err) + } + + // Update the webhook when it does exist and the events/configuration have changed + if isAlreadyExists { + logger.Info("webhook already exists") + if router.shouldUpdateWebhook(existingHook, newHook) { + logger.Info("webhook requires an update") + err = router.updateWebhook(existingHook.ID, requestBody, repo) + if err != nil { + return fmt.Errorf("failed to update webhook. err: %w", err) + } + + logger.With("hook-id", existingHook.ID).Info("hook successfully updated") + } + + return nil + } + + // Create the webhook when it doesn't exist yet + createdHook, err := router.createWebhook(requestBody, repo) + if err != nil { + return fmt.Errorf("failed to create webhook. err: %w", err) + } + + router.hookIDs[repo.ProjectKey+","+repo.RepositorySlug] = createdHook.ID + + logger.With("hook-id", createdHook.ID).Info("hook successfully registered") + + return nil +} + +func (router *Router) listWebhooks(repo v1alpha1.BitbucketServerRepository) ([]bitbucketv1.Webhook, error) { + apiResponse, err := router.client.DefaultApi.FindWebhooks(repo.ProjectKey, repo.RepositorySlug, nil) + if err != nil { + return nil, fmt.Errorf("failed to list existing hooks to check for duplicates for repository %s/%s, %w", repo.ProjectKey, repo.RepositorySlug, err) + } + + hooks, err := bitbucketv1.GetWebhooksResponse(apiResponse) + if err != nil { + return nil, fmt.Errorf("failed to convert the list of webhooks for repository %s/%s, %w", repo.ProjectKey, repo.RepositorySlug, err) + } + + return hooks, nil +} + +func (router *Router) createWebhook(requestBody []byte, repo v1alpha1.BitbucketServerRepository) (*bitbucketv1.Webhook, error) { + apiResponse, err := router.client.DefaultApi.CreateWebhook(repo.ProjectKey, repo.RepositorySlug, requestBody, []string{"application/json"}) + if err != nil { + return nil, fmt.Errorf("failed to add webhook. err: %w", err) + } + + var createdHook *bitbucketv1.Webhook + err = mapstructure.Decode(apiResponse.Values, &createdHook) + if err != nil { + return nil, fmt.Errorf("failed to convert API response to Webhook struct. err: %w", err) + } + + return createdHook, nil +} + +func (router *Router) updateWebhook(hookID int, requestBody []byte, repo v1alpha1.BitbucketServerRepository) error { + _, err := router.client.DefaultApi.UpdateWebhook(repo.ProjectKey, repo.RepositorySlug, int32(hookID), requestBody, []string{"application/json"}) + + return err +} + +func (router *Router) shouldUpdateWebhook(existingHook bitbucketv1.Webhook, newHook bitbucketv1.Webhook) bool { + return !common.ElementsMatch(existingHook.Events, newHook.Events) || + existingHook.Configuration.Secret != newHook.Configuration.Secret +} + +func (router *Router) createRequestBodyFromWebhook(hook bitbucketv1.Webhook) ([]byte, error) { + var err error + var finalHook interface{} = hook + + // if the hook doesn't have a secret, the configuration field must be removed in order for the request to succeed, + // otherwise Bitbucket Server sends 500 response because of empty string value in the hook.Configuration.Secret field + if hook.Configuration.Secret == "" { + hookMap := make(map[string]interface{}) + err = common.StructToMap(hook, hookMap) + if err != nil { + return nil, fmt.Errorf("failed to convert webhook to map, %w", err) + } + + delete(hookMap, "configuration") + + finalHook = hookMap + } + + requestBody, err := json.Marshal(finalHook) + if err != nil { + return nil, fmt.Errorf("failed to marshal new webhook to JSON, %w", err) + } + + return requestBody, nil +} + +func (router *Router) parseAndValidateBitbucketServerRequest(request *http.Request) ([]byte, error) { + body, err := io.ReadAll(request.Body) + if err != nil { + return nil, fmt.Errorf("failed to parse request body, %w", err) + } + + if len(router.hookSecret) != 0 { + signature := request.Header.Get("X-Hub-Signature") + if len(signature) == 0 { + return nil, fmt.Errorf("missing signature header") + } + + mac := hmac.New(sha256.New, []byte(router.hookSecret)) + _, _ = mac.Write(body) + expectedMAC := hex.EncodeToString(mac.Sum(nil)) + + if !hmac.Equal([]byte(signature[7:]), []byte(expectedMAC)) { + return nil, fmt.Errorf("hmac verification failed") + } + } + + return body, nil +} + +// refsChangedHasOpenPullRequest returns true if the changed commit has an open pull request +func (router *Router) refsChangedHasOpenPullRequest(project, repository, commit string) (bool, error) { + bitbucketPullRequests, err := router.customClient.GetCommitPullRequests(project, repository, commit) + if err != nil { + return false, fmt.Errorf("getting commit pull requests for project %s, repository %s and commit %s: %w", + project, repository, commit, err) + } + + for _, bitbucketPullRequest := range bitbucketPullRequests { + if strings.EqualFold(bitbucketPullRequest.State, "OPEN") { + return true, nil + } + } + + return false, nil +} + +type bitbucketServerReposPager struct { + Size int `json:"size"` + Limit int `json:"limit"` + Start int `json:"start"` + NextPageStart int `json:"nextPageStart"` + IsLastPage bool `json:"isLastPage"` + Values []bitbucketv1.Repository `json:"values"` +} + +// getProjectRepositories returns all the Bitbucket Server repositories in the provided projects +func getProjectRepositories(client *bitbucketv1.APIClient, projects []string) ([]v1alpha1.BitbucketServerRepository, error) { + var bitbucketRepos []bitbucketv1.Repository + for _, project := range projects { + paginationOptions := map[string]interface{}{"start": 0, "limit": 500} + for { + response, err := client.DefaultApi.GetRepositoriesWithOptions(project, paginationOptions) + if err != nil { + return nil, fmt.Errorf("unable to list repositories for project %s: %w", project, err) + } + + var reposPager bitbucketServerReposPager + err = mapstructure.Decode(response.Values, &reposPager) + if err != nil { + return nil, fmt.Errorf("unable to decode repositories for project %s: %w", project, err) + } + + bitbucketRepos = append(bitbucketRepos, reposPager.Values...) + + if reposPager.IsLastPage { + break + } + + paginationOptions["start"] = reposPager.NextPageStart + } + } + + var repositories []v1alpha1.BitbucketServerRepository + for n := range bitbucketRepos { + repositories = append(repositories, v1alpha1.BitbucketServerRepository{ + ProjectKey: bitbucketRepos[n].Project.Key, + RepositorySlug: bitbucketRepos[n].Slug, + }) + } + + return repositories, nil +} + +func newBitbucketServerClientCfg(bitbucketserverEventSource *v1alpha1.BitbucketServerEventSource) (*bitbucketv1.Configuration, error) { + bitbucketCfg := bitbucketv1.NewConfiguration(bitbucketserverEventSource.BitbucketServerBaseURL) + bitbucketCfg.AddDefaultHeader("x-atlassian-token", "no-check") + bitbucketCfg.AddDefaultHeader("x-requested-with", "XMLHttpRequest") + bitbucketCfg.HTTPClient = &http.Client{} + + if bitbucketserverEventSource.TLS != nil { + tlsConfig, err := common.GetTLSConfig(bitbucketserverEventSource.TLS) + if err != nil { + return nil, fmt.Errorf("failed to get the tls configuration. err: %w", err) + } + + bitbucketCfg.HTTPClient.Transport = &http.Transport{ + TLSClientConfig: tlsConfig, + } + } + + return bitbucketCfg, nil +} + +func newBitbucketServerClient(ctx context.Context, bitbucketConfig *bitbucketv1.Configuration, bitbucketToken string) *bitbucketv1.APIClient { + ctx = context.WithValue(ctx, bitbucketv1.ContextAccessToken, bitbucketToken) + return bitbucketv1.NewAPIClient(ctx, bitbucketConfig) +} + +type refsChangedWebhookEvent struct { + EventKey string `json:"eventKey"` + Repository struct { + Slug string `json:"slug"` + Project struct { + Key string `json:"key"` + } `json:"project"` + } `json:"repository"` + Changes []struct { + Ref struct { + Type string `json:"type"` + } `json:"ref"` + ToHash string `json:"toHash"` + Type string `json:"type"` + } `json:"changes"` +} + +// customBitbucketClient returns a Bitbucket HTTP client that implements methods that gfleury/go-bitbucket-v1 does not. +// Specifically getting Pull Requests associated to a commit is not supported by gfleury/go-bitbucket-v1. +type customBitbucketClient struct { + client *http.Client + ctx context.Context + token string + url *url.URL +} + +type pagination struct { + Start int + Limit int +} + +func (p *pagination) StartStr() string { + return strconv.Itoa(p.Start) +} + +func (p *pagination) LimitStr() string { + return strconv.Itoa(p.Limit) +} + +func (c *customBitbucketClient) authHeader(req *http.Request) { + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token)) +} + +func (c *customBitbucketClient) get(u string) ([]byte, error) { + req, err := http.NewRequestWithContext(c.ctx, http.MethodGet, u, nil) + if err != nil { + return nil, err + } + + c.authHeader(req) + + res, err := c.client.Do(req) + if err != nil { + return nil, err + } + defer func() { + _ = res.Body.Close() + }() + + if res.StatusCode > 299 { + resBody, readErr := io.ReadAll(res.Body) + if readErr != nil { + return nil, readErr + } + + return nil, fmt.Errorf("calling endpoint '%s' failed: status %d: response body: %s", u, res.StatusCode, resBody) + } + + return io.ReadAll(res.Body) +} + +// pullRequestRes is a struct containing information about the Pull Request. +type pullRequestRes struct { + ID int `json:"id"` + State string `json:"state"` +} + +// pagedPullRequestsRes is a paged response with values of pullRequestRes. +type pagedPullRequestsRes struct { + Size int `json:"size"` + Limit int `json:"limit"` + IsLastPage bool `json:"isLastPage"` + Values []pullRequestRes `json:"values"` + Start int `json:"start"` + NextPageStart int `json:"nextPageStart"` +} + +// GetCommitPullRequests returns all the Pull Requests associated to the commit id. +func (c *customBitbucketClient) GetCommitPullRequests(project, repository, commit string) ([]pullRequestRes, error) { + p := pagination{Start: 0, Limit: 500} + + commitsURL := c.url.JoinPath(fmt.Sprintf("api/1.0/projects/%s/repos/%s/commits/%s/pull-requests", project, repository, commit)) + query := commitsURL.Query() + query.Set("limit", p.LimitStr()) + + var pullRequests []pullRequestRes + for { + query.Set("start", p.StartStr()) + commitsURL.RawQuery = query.Encode() + + body, err := c.get(commitsURL.String()) + if err != nil { + return nil, err + } + + var pagedPullRequests pagedPullRequestsRes + err = json.Unmarshal(body, &pagedPullRequests) + if err != nil { + return nil, err + } + + pullRequests = append(pullRequests, pagedPullRequests.Values...) + + if pagedPullRequests.IsLastPage { + break + } + + p.Start = pagedPullRequests.NextPageStart + } + + return pullRequests, nil +} diff --git a/eventsources/sources/bitbucketserver/types.go b/eventsources/sources/bitbucketserver/types.go new file mode 100644 index 0000000000..45ad1d8af3 --- /dev/null +++ b/eventsources/sources/bitbucketserver/types.go @@ -0,0 +1,68 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucketserver + +import ( + "github.com/argoproj/argo-events/eventsources/common/webhook" + metrics "github.com/argoproj/argo-events/metrics" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + bitbucketv1 "github.com/gfleury/go-bitbucket-v1" +) + +// EventListener implements ConfigExecutor +type EventListener struct { + EventSourceName string + EventName string + BitbucketServerEventSource v1alpha1.BitbucketServerEventSource + Metrics *metrics.Metrics +} + +// GetEventSourceName returns name of event source +func (el *EventListener) GetEventSourceName() string { + return el.EventSourceName +} + +// GetEventName returns name of event +func (el *EventListener) GetEventName() string { + return el.EventName +} + +// GetEventSourceType return type of event server +func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { + return apicommon.BitbucketServerEvent +} + +// Router contains the configuration information for a route +type Router struct { + // route contains information about a API endpoint + route *webhook.Route + // client is the bitbucket server client + client *bitbucketv1.APIClient + // customClient is a custom bitbucket server client which implements a method the gfleury/go-bitbucket-v1 client is missing + customClient *customBitbucketClient + // deleteClient is used to delete webhooks. This client does not contain the cancelable context of the default client + deleteClient *bitbucketv1.APIClient + // hookIDs is a map of webhook IDs + // (projectKey + "," + repoSlug) -> hook ID + // Bitbucket Server API docs: + // https://developer.atlassian.com/server/bitbucket/reference/rest-api/ + hookIDs map[string]int + // hookSecret is a Bitbucket Server webhook secret + hookSecret string + // bitbucketserverEventSource is the event source that contains configuration necessary to consume events from Bitbucket Server + bitbucketserverEventSource *v1alpha1.BitbucketServerEventSource +} diff --git a/eventsources/sources/bitbucketserver/validate.go b/eventsources/sources/bitbucketserver/validate.go new file mode 100644 index 0000000000..2e1187a131 --- /dev/null +++ b/eventsources/sources/bitbucketserver/validate.go @@ -0,0 +1,52 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucketserver + +import ( + "context" + "fmt" + "time" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// ValidateEventSource validates bitbucketserver event source +func (listener *EventListener) ValidateEventSource(ctx context.Context) error { + return validate(&listener.BitbucketServerEventSource) +} + +func validate(eventSource *v1alpha1.BitbucketServerEventSource) error { + if eventSource == nil { + return common.ErrNilEventSource + } + if eventSource.GetBitbucketServerRepositories() == nil && len(eventSource.Projects) == 0 { + return fmt.Errorf("at least one project or repository configuration is required") + } + if eventSource.ShouldCreateWebhooks() && len(eventSource.Events) == 0 { + return fmt.Errorf("events must be defined to create a bitbucket server webhook") + } + if eventSource.BitbucketServerBaseURL == "" { + return fmt.Errorf("bitbucket server base url can't be empty") + } + if eventSource.CheckInterval != "" { + if _, err := time.ParseDuration(eventSource.CheckInterval); err != nil { + return fmt.Errorf("failed to parse webhook check interval duration: %w", err) + } + } + return webhook.ValidateWebhookContext(eventSource.Webhook) +} diff --git a/eventsources/sources/bitbucketserver/validate_test.go b/eventsources/sources/bitbucketserver/validate_test.go new file mode 100644 index 0000000000..e767d6685d --- /dev/null +++ b/eventsources/sources/bitbucketserver/validate_test.go @@ -0,0 +1,54 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bitbucketserver + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +func TestValidateEventSource(t *testing.T) { + listener := &EventListener{} + + err := listener.ValidateEventSource(context.Background()) + assert.Error(t, err) + assert.Equal(t, "at least one project or repository configuration is required", err.Error()) + + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "bitbucketserver.yaml")) + assert.Nil(t, err) + + var eventSource *v1alpha1.EventSource + err = yaml.Unmarshal(content, &eventSource) + assert.Nil(t, err) + assert.NotNil(t, eventSource.Spec.BitbucketServer) + + for name, value := range eventSource.Spec.BitbucketServer { + fmt.Println(name) + l := &EventListener{ + BitbucketServerEventSource: value, + } + err := l.ValidateEventSource(context.Background()) + assert.NoError(t, err) + } +} diff --git a/eventsources/sources/calendar/start.go b/eventsources/sources/calendar/start.go index 088a3d4566..2b4a0db1bb 100644 --- a/eventsources/sources/calendar/start.go +++ b/eventsources/sources/calendar/start.go @@ -24,13 +24,13 @@ import ( "strings" "time" - "github.com/pkg/errors" - cronlib "github.com/robfig/cron" + cronlib "github.com/robfig/cron/v3" "go.uber.org/zap" "k8s.io/client-go/kubernetes" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/persist" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -74,11 +74,11 @@ func (el *EventListener) initializePersistence(ctx context.Context, persistence restConfig, err := common.GetClientConfig(kubeConfig) if err != nil { - return errors.Wrapf(err, "failed to get a K8s rest config for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to get a K8s rest config for the event source %s, %w", el.GetEventName(), err) } kubeClientset, err := kubernetes.NewForConfig(restConfig) if err != nil { - return errors.Wrapf(err, "failed to set up a K8s client for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to set up a K8s client for the event source %s, %w", el.GetEventName(), err) } el.eventPersistence, err = persist.NewConfigMapPersist(ctx, kubeClientset, persistence.ConfigMap, el.Namespace) @@ -100,20 +100,20 @@ func (el *EventListener) getExecutionTime() (time.Time, error) { lastEvent, err := el.eventPersistence.Get(el.getPersistenceKey()) if err != nil { el.log.Errorw("failed to get last persisted event.", zap.Error(err)) - return lastT, errors.Wrap(err, "failed to get last persisted event.") + return lastT, fmt.Errorf("failed to get last persisted event, , %w", err) } if lastEvent != nil && lastEvent.EventPayload != "" { var eventData events.CalendarEventData err := json.Unmarshal([]byte(lastEvent.EventPayload), &eventData) if err != nil { el.log.Errorw("failed to marshal last persisted event.", zap.Error(err)) - return lastT, errors.Wrap(err, "failed to marshal last persisted event.") + return lastT, fmt.Errorf("failed to marshal last persisted event, , %w", err) } eventTime := strings.Split(eventData.EventTime, " m=") lastT, err = time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", eventTime[0]) if err != nil { el.log.Errorw("failed to parse the persisted last event timestamp", zap.Error(err)) - return lastT, errors.Wrap(err, "failed to parse the persisted last event timestamp.") + return lastT, fmt.Errorf("failed to parse the persisted last event timestamp, %w", err) } } @@ -134,7 +134,7 @@ func (el *EventListener) getExecutionTime() (time.Time, error) { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { el.log = logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) el.log.Info("started processing the calendar event source...") @@ -186,7 +186,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt el.log.Infow("loading location for the schedule...", zap.Any("location", calendarEventSource.Timezone)) location, err = time.LoadLocation(calendarEventSource.Timezone) if err != nil { - return errors.Wrapf(err, "failed to load location for event source %s / %s", el.GetEventSourceName(), el.GetEventName()) + return fmt.Errorf("failed to load location for event source %s / %s, , %w", el.GetEventSourceName(), el.GetEventName(), err) } lastT = lastT.In(location) } @@ -196,21 +196,20 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt }(time.Now()) eventData := &events.CalendarEventData{ - EventTime: tx.String(), - UserPayload: calendarEventSource.UserPayload, - Metadata: calendarEventSource.Metadata, + EventTime: tx.String(), + Metadata: calendarEventSource.Metadata, } payload, err := json.Marshal(eventData) if err != nil { el.log.Errorw("failed to marshal the event data", zap.Error(err)) // no need to continue as further event payloads will suffer same fate as this one. - return errors.Wrapf(err, "failed to marshal the event data for event source %s / %s", el.GetEventSourceName(), el.GetEventName()) + return fmt.Errorf("failed to marshal the event data for event source %s / %s, %w", el.GetEventSourceName(), el.GetEventName(), err) } el.log.Info("dispatching calendar event...") err = dispatch(payload) if err != nil { el.log.Errorw("failed to dispatch calendar event", zap.Error(err)) - return errors.Wrapf(err, "failed to dispatch calendar event") + return fmt.Errorf("failed to dispatch calendar event, %w", err) } if el.eventPersistence != nil && el.eventPersistence.IsEnabled() { event := persist.Event{EventKey: el.getPersistenceKey(), EventPayload: string(payload)} @@ -278,17 +277,17 @@ func resolveSchedule(cal *v1alpha1.CalendarEventSource) (cronlib.Schedule, error specParser := cronlib.NewParser(cronlib.Minute | cronlib.Hour | cronlib.Dom | cronlib.Month | cronlib.Dow) schedule, err := specParser.Parse(cal.Schedule) if err != nil { - return nil, errors.Errorf("failed to parse schedule %s from calendar event. Cause: %+v", cal.Schedule, err.Error()) + return nil, fmt.Errorf("failed to parse schedule %s from calendar event. Cause: %w", cal.Schedule, err) } return schedule, nil } if cal.Interval != "" { intervalDuration, err := time.ParseDuration(cal.Interval) if err != nil { - return nil, errors.Errorf("failed to parse interval %s from calendar event. Cause: %+v", cal.Interval, err.Error()) + return nil, fmt.Errorf("failed to parse interval %s from calendar event. Cause: %w", cal.Interval, err) } schedule := cronlib.ConstantDelaySchedule{Delay: intervalDuration} return schedule, nil } - return nil, errors.New("calendar event must contain either a schedule or interval") + return nil, fmt.Errorf("calendar event must contain either a schedule or interval") } diff --git a/eventsources/sources/calendar/validate_test.go b/eventsources/sources/calendar/validate_test.go index 02f220476f..a4f6c80d7b 100644 --- a/eventsources/sources/calendar/validate_test.go +++ b/eventsources/sources/calendar/validate_test.go @@ -19,7 +19,7 @@ package calendar import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -39,7 +39,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "must have either schedule or interval", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "calendar.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "calendar.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/emitter/start.go b/eventsources/sources/emitter/start.go index ed7fe046fc..bc6a434693 100644 --- a/eventsources/sources/emitter/start.go +++ b/eventsources/sources/emitter/start.go @@ -19,14 +19,15 @@ package emitter import ( "context" "encoding/json" + "fmt" "time" emitter "github.com/emitter-io/go/v2" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -58,7 +59,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the Emitter event source...") @@ -70,7 +71,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if emitterEventSource.TLS != nil { tlsConfig, err := common.GetTLSConfig(emitterEventSource.TLS) if err != nil { - return errors.Wrap(err, "failed to get the tls configuration") + return fmt.Errorf("failed to get the tls configuration, %w", err) } options = append(options, emitter.WithTLSConfig(tlsConfig)) } @@ -79,7 +80,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if emitterEventSource.Username != nil { username, err := common.GetSecretFromVolume(emitterEventSource.Username) if err != nil { - return errors.Wrapf(err, "failed to retrieve the username from %s", emitterEventSource.Username.Name) + return fmt.Errorf("failed to retrieve the username from %s, %w", emitterEventSource.Username.Name, err) } options = append(options, emitter.WithUsername(username)) } @@ -87,7 +88,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if emitterEventSource.Password != nil { password, err := common.GetSecretFromVolume(emitterEventSource.Password) if err != nil { - return errors.Wrapf(err, "failed to retrieve the password from %s", emitterEventSource.Password.Name) + return fmt.Errorf("failed to retrieve the password from %s, %w", emitterEventSource.Password.Name, err) } options = append(options, emitter.WithPassword(password)) } @@ -99,13 +100,13 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Infow("creating a client", zap.Any("channelName", emitterEventSource.ChannelName)) client := emitter.NewClient(options...) - if err := common.Connect(emitterEventSource.ConnectionBackoff, func() error { + if err := common.DoWithRetry(emitterEventSource.ConnectionBackoff, func() error { if err := client.Connect(); err != nil { return err } return nil }); err != nil { - return errors.Wrapf(err, "failed to connect to %s", emitterEventSource.Broker) + return fmt.Errorf("failed to connect to %s, %w", emitterEventSource.Broker, err) } if err := client.Subscribe(emitterEventSource.ChannelKey, emitterEventSource.ChannelName, func(_ *emitter.Client, message emitter.Message) { @@ -135,7 +136,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) } }); err != nil { - return errors.Wrapf(err, "failed to subscribe to channel %s", emitterEventSource.ChannelName) + return fmt.Errorf("failed to subscribe to channel %s, %w", emitterEventSource.ChannelName, err) } <-ctx.Done() diff --git a/eventsources/sources/emitter/validate.go b/eventsources/sources/emitter/validate.go index ec0aa5f7ff..bbf9152149 100644 --- a/eventsources/sources/emitter/validate.go +++ b/eventsources/sources/emitter/validate.go @@ -18,8 +18,7 @@ package emitter import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/argoproj/argo-events/common" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -36,13 +35,13 @@ func validate(eventSource *v1alpha1.EmitterEventSource) error { return common.ErrNilEventSource } if eventSource.Broker == "" { - return errors.New("broker url must be specified") + return fmt.Errorf("broker url must be specified") } if eventSource.ChannelName == "" { - return errors.New("channel name must be specified") + return fmt.Errorf("channel name must be specified") } if eventSource.ChannelKey == "" { - return errors.New("channel key secret selector must be specified") + return fmt.Errorf("channel key secret selector must be specified") } if eventSource.TLS != nil { return apicommon.ValidateTLSConfig(eventSource.TLS) diff --git a/eventsources/sources/emitter/validate_test.go b/eventsources/sources/emitter/validate_test.go index b6c657d6b1..a3cf1a2d52 100644 --- a/eventsources/sources/emitter/validate_test.go +++ b/eventsources/sources/emitter/validate_test.go @@ -19,7 +19,7 @@ package emitter import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "broker url must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "emitter.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "emitter.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/file/start.go b/eventsources/sources/file/start.go index b0ea5e5a23..caa4fa02fe 100644 --- a/eventsources/sources/file/start.go +++ b/eventsources/sources/file/start.go @@ -19,16 +19,17 @@ package file import ( "context" "encoding/json" + "fmt" "regexp" "strings" "time" "github.com/fsnotify/fsnotify" - "github.com/pkg/errors" watcherpkg "github.com/radovskyb/watcher" "go.uber.org/zap" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/fsevent" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" @@ -60,7 +61,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) defer sources.Recover(el.GetEventName()) @@ -81,14 +82,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } // listenEvents listen to file related events. -func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte) error, log *zap.SugaredLogger) error { +func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { fileEventSource := &el.FileEventSource // create new fs watcher log.Info("setting up a new file watcher...") watcher, err := fsnotify.NewWatcher() if err != nil { - return errors.Wrapf(err, "failed to set up a file watcher for %s", el.GetEventName()) + return fmt.Errorf("failed to set up a file watcher for %s, %w", el.GetEventName(), err) } defer watcher.Close() @@ -96,7 +97,7 @@ func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte) log.Info("adding directory to monitor for the watcher...") err = watcher.Add(fileEventSource.WatchPathConfig.Directory) if err != nil { - return errors.Wrapf(err, "failed to add directory %s to the watcher for %s", fileEventSource.WatchPathConfig.Directory, el.GetEventName()) + return fmt.Errorf("failed to add directory %s to the watcher for %s, %w", fileEventSource.WatchPathConfig.Directory, el.GetEventName(), err) } var pathRegexp *regexp.Regexp @@ -104,7 +105,7 @@ func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte) log.Infow("matching file path with configured regex...", zap.Any("regex", fileEventSource.WatchPathConfig.PathRegexp)) pathRegexp, err = regexp.Compile(fileEventSource.WatchPathConfig.PathRegexp) if err != nil { - return errors.Wrapf(err, "failed to match file path with configured regex %s for %s", fileEventSource.WatchPathConfig.PathRegexp, el.GetEventName()) + return fmt.Errorf("failed to match file path with configured regex %s for %s, %w", fileEventSource.WatchPathConfig.PathRegexp, el.GetEventName(), err) } } @@ -119,11 +120,11 @@ func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte) fileEvent := fsevent.Event{Name: event.Name, Op: fsevent.NewOp(event.Op.String()), Metadata: el.FileEventSource.Metadata} payload, err := json.Marshal(fileEvent) if err != nil { - return errors.Wrap(err, "failed to marshal the event to the fs event") + return fmt.Errorf("failed to marshal the event to the fs event, %w", err) } log.Infow("dispatching file event on data channel...", zap.Any("event-type", event.Op.String()), zap.Any("descriptor-name", event.Name)) if err = dispatch(payload); err != nil { - return errors.Wrap(err, "failed to dispatch a file event") + return fmt.Errorf("failed to dispatch a file event, %w", err) } return nil } @@ -135,7 +136,7 @@ func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte) if !ok { log.Info("fs watcher has stopped") // watcher stopped watching file events - return errors.Errorf("fs watcher stopped for %s", el.GetEventName()) + return fmt.Errorf("fs watcher stopped for %s", el.GetEventName()) } // fwc.Path == event.Name is required because we don't want to send event when .swp files are created matched := false @@ -152,7 +153,7 @@ func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte) } } case err := <-watcher.Errors: - return errors.Wrapf(err, "failed to process %s", el.GetEventName()) + return fmt.Errorf("failed to process %s, %w", el.GetEventName(), err) case <-ctx.Done(): log.Info("event source has been stopped") return nil @@ -161,7 +162,7 @@ func (el *EventListener) listenEvents(ctx context.Context, dispatch func([]byte) } // listenEvents listen to file related events using polling. -func (el *EventListener) listenEventsPolling(ctx context.Context, dispatch func([]byte) error, log *zap.SugaredLogger) error { +func (el *EventListener) listenEventsPolling(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { fileEventSource := &el.FileEventSource // create new fs watcher @@ -173,7 +174,7 @@ func (el *EventListener) listenEventsPolling(ctx context.Context, dispatch func( log.Info("adding directory to monitor for the watcher...") err := watcher.Add(fileEventSource.WatchPathConfig.Directory) if err != nil { - return errors.Wrapf(err, "failed to add directory %s to the watcher for %s", fileEventSource.WatchPathConfig.Directory, el.GetEventName()) + return fmt.Errorf("failed to add directory %s to the watcher for %s, %w", fileEventSource.WatchPathConfig.Directory, el.GetEventName(), err) } var pathRegexp *regexp.Regexp @@ -181,7 +182,7 @@ func (el *EventListener) listenEventsPolling(ctx context.Context, dispatch func( log.Infow("matching file path with configured regex...", zap.Any("regex", fileEventSource.WatchPathConfig.PathRegexp)) pathRegexp, err = regexp.Compile(fileEventSource.WatchPathConfig.PathRegexp) if err != nil { - return errors.Wrapf(err, "failed to match file path with configured regex %s for %s", fileEventSource.WatchPathConfig.PathRegexp, el.GetEventName()) + return fmt.Errorf("failed to match file path with configured regex %s for %s, %w", fileEventSource.WatchPathConfig.PathRegexp, el.GetEventName(), err) } } @@ -196,11 +197,11 @@ func (el *EventListener) listenEventsPolling(ctx context.Context, dispatch func( fileEvent := fsevent.Event{Name: event.Name(), Op: fsevent.NewOp(event.Op.String()), Metadata: el.FileEventSource.Metadata} payload, err := json.Marshal(fileEvent) if err != nil { - return errors.Wrap(err, "failed to marshal the event to the fs event") + return fmt.Errorf("failed to marshal the event to the fs event, %w", err) } log.Infow("dispatching file event on data channel...", zap.Any("event-type", event.Op.String()), zap.Any("descriptor-name", event.Name)) if err = dispatch(payload); err != nil { - return errors.Wrap(err, "failed to dispatch file event") + return fmt.Errorf("failed to dispatch file event, %w", err) } return nil } @@ -241,7 +242,7 @@ func (el *EventListener) listenEventsPolling(ctx context.Context, dispatch func( }() log.Info("Starting watcher...") if err = watcher.Start(time.Millisecond * 100); err != nil { - return errors.Wrapf(err, "Failed to start watcher for %s", el.GetEventName()) + return fmt.Errorf("Failed to start watcher for %s, %w", el.GetEventName(), err) } return nil } diff --git a/eventsources/sources/file/validate_test.go b/eventsources/sources/file/validate_test.go index 9c4d86e303..eddbadb452 100644 --- a/eventsources/sources/file/validate_test.go +++ b/eventsources/sources/file/validate_test.go @@ -19,7 +19,7 @@ package file import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -36,7 +36,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "type must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "file.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "file.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/gcppubsub/start.go b/eventsources/sources/gcppubsub/start.go index 84b0f2f0a0..ba452f6f47 100644 --- a/eventsources/sources/gcppubsub/start.go +++ b/eventsources/sources/gcppubsub/start.go @@ -20,12 +20,12 @@ import ( "context" "encoding/json" "fmt" + "os" "time" "cloud.google.com/go/compute/metadata" "cloud.google.com/go/pubsub" - "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/api/option" "google.golang.org/grpc/codes" @@ -33,6 +33,7 @@ import ( "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -64,7 +65,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening listens to GCP PubSub events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { // In order to listen events from GCP PubSub, // 1. Parse the event source that contains configuration to connect to GCP PubSub // 2. Create a new PubSub client @@ -80,7 +81,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt err := el.fillDefault(logger) if err != nil { - return errors.Wrapf(err, "failed to fill default values for %s", el.GetEventName()) + return fmt.Errorf("failed to fill default values for %s, %w", el.GetEventName(), err) } pubsubEventSource := &el.PubSubEventSource @@ -98,7 +99,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("setting up a client to connect to PubSub...") client, subscription, err := el.prepareSubscription(ctx, log) if err != nil { - return errors.Wrapf(err, "failed to prepare client or subscription for %s", el.GetEventName()) + return fmt.Errorf("failed to prepare client or subscription for %s, %w", el.GetEventName(), err) } log.Info("listening for messages from PubSub...") @@ -136,7 +137,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt m.Ack() }) if err != nil { - return errors.Wrapf(err, "failed to receive the messages for subscription %s for %s", subscription, el.GetEventName()) + return fmt.Errorf("failed to receive the messages for subscription %s for %s, %w", subscription, el.GetEventName(), err) } <-ctx.Done() @@ -169,7 +170,7 @@ func (el *EventListener) fillDefault(logger *zap.SugaredLogger) error { logger.Debug("determine project ID from GCP metadata server") proj, err := metadata.ProjectID() if err != nil { - return errors.Wrap(err, "project ID is not given and couldn't determine from GCP metadata server") + return fmt.Errorf("project ID is not given and couldn't determine from GCP metadata server, %w", err) } el.PubSubEventSource.ProjectID = proj } @@ -182,7 +183,7 @@ func (el *EventListener) fillDefault(logger *zap.SugaredLogger) error { logger.Debug("auto generate subscription ID") hashcode, err := el.hash() if err != nil { - return errors.Wrap(err, "failed get hashcode") + return fmt.Errorf("failed get hashcode, %w", err) } el.PubSubEventSource.SubscriptionID = fmt.Sprintf("%s-%s", el.GetEventName(), hashcode) } @@ -206,18 +207,15 @@ func (el *EventListener) prepareSubscription(ctx context.Context, logger *zap.Su logger.Debug("using credentials from secret") jsonCred, err := common.GetSecretFromVolume(secret) if err != nil { - return nil, nil, errors.Wrap(err, "could not find credentials") + return nil, nil, fmt.Errorf("could not find credentials, %w", err) } opts = append(opts, option.WithCredentialsJSON([]byte(jsonCred))) - } else if credFile := el.PubSubEventSource.DeprecatedCredentialsFile; credFile != "" { - logger.Debug("using credentials from file (DEPRECATED)") - opts = append(opts, option.WithCredentialsFile(credFile)) } else { logger.Debug("using default credentials") } client, err := pubsub.NewClient(ctx, pubsubEventSource.ProjectID, opts...) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to set up client for %s", el.GetEventName()) + return nil, nil, fmt.Errorf("failed to set up client for %s, %w", el.GetEventName(), err) } logger.Debug("set up pubsub client") @@ -233,23 +231,33 @@ func (el *EventListener) prepareSubscription(ctx context.Context, logger *zap.Su // no | yes | yes | create subsc. | pubsub.subscriptions.create (proj.) + pubsub.topics.attachSubscription (topic) // no | yes | no | create topic & subsc. | above + pubsub.topics.create (proj. for topic) - // trick: you don't need to have get permission to check only whether it exists - perms, err := subscription.IAM().TestPermissions(ctx, []string{"pubsub.subscriptions.consume"}) - subscExists := len(perms) == 1 - if !subscExists { - switch status.Code(err) { - case codes.OK: - client.Close() - return nil, nil, errors.Errorf("you lack permission to pull from %s", subscription) - case codes.NotFound: - // OK, maybe the subscription doesn't exist yet, so create it later - // (it possibly means project itself doesn't exist, but it's ok because we'll see an error later in such case) - default: + subscExists := false + if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { + logger.Debug("using pubsub emulator - skipping permissions check") + subscExists, err = subscription.Exists(ctx) + if err != nil { client.Close() - return nil, nil, errors.Wrapf(err, "failed to test permission for subscription %s", subscription) + return nil, nil, fmt.Errorf("failed to check if subscription %s exists", subscription) + } + } else { + // trick: you don't need to have get permission to check only whether it exists + perms, err := subscription.IAM().TestPermissions(ctx, []string{"pubsub.subscriptions.consume"}) + subscExists = len(perms) == 1 + if !subscExists { + switch status.Code(err) { + case codes.OK: + client.Close() + return nil, nil, fmt.Errorf("you lack permission to pull from %s", subscription) + case codes.NotFound: + // OK, maybe the subscription doesn't exist yet, so create it later + // (it possibly means project itself doesn't exist, but it's ok because we'll see an error later in such case) + default: + client.Close() + return nil, nil, fmt.Errorf("failed to test permission for subscription %s, %w", subscription, err) + } } + logger.Debug("checked if subscription exists and you have right permission") } - logger.Debug("checked if subscription exists and you have right permission") // subsc. exists | topic given | topic exists | action | required permissions // :------------ | :---------- | :----------- | :-------------------- | :----------------------------------------------------------------------------- @@ -258,7 +266,7 @@ func (el *EventListener) prepareSubscription(ctx context.Context, logger *zap.Su if pubsubEventSource.Topic == "" { if !subscExists { client.Close() - return nil, nil, errors.Errorf("you need to specify topicID to create missing subscription %s", subscription) + return nil, nil, fmt.Errorf("you need to specify topicID to create missing subscription %s", subscription) } logger.Debug("subscription exists and no topic given, fine") return client, subscription, nil @@ -273,18 +281,18 @@ func (el *EventListener) prepareSubscription(ctx context.Context, logger *zap.Su subscConfig, err := subscription.Config(ctx) if err != nil { client.Close() - return nil, nil, errors.Wrapf(err, "failed to get subscription's config for verifying topic") + return nil, nil, fmt.Errorf("failed to get subscription's config for verifying topic, %w", err) } switch actualTopic := subscConfig.Topic.String(); actualTopic { case "_deleted-topic_": client.Close() - return nil, nil, errors.New("the topic for the subscription has been deleted") + return nil, nil, fmt.Errorf("the topic for the subscription has been deleted") case topic.String(): logger.Debug("subscription exists and its topic matches given one, fine") return client, subscription, nil default: client.Close() - return nil, nil, errors.Errorf("this subscription belongs to wrong topic %s", actualTopic) + return nil, nil, fmt.Errorf("this subscription belongs to wrong topic %s", actualTopic) } } @@ -303,7 +311,7 @@ func (el *EventListener) prepareSubscription(ctx context.Context, logger *zap.Su // (it possibly means project itself doesn't exist, but it's ok because we'll see an error later in such case) default: client.Close() - return nil, nil, errors.Wrapf(err, "failed to create %s for %s", subscription, topic) + return nil, nil, fmt.Errorf("failed to create %s for %s, %w", subscription, topic, err) } // subsc. exists | topic given | topic exists | action | required permissions @@ -314,20 +322,20 @@ func (el *EventListener) prepareSubscription(ctx context.Context, logger *zap.Su topicClient, err := pubsub.NewClient(ctx, pubsubEventSource.TopicProjectID, opts...) if err != nil { client.Close() - return nil, nil, errors.Wrapf(err, "failed to create client to create %s", topic) + return nil, nil, fmt.Errorf("failed to create client to create %s, %w", topic, err) } defer topicClient.Close() _, err = topicClient.CreateTopic(ctx, topic.ID()) if err != nil { client.Close() - return nil, nil, errors.Wrapf(err, "failed to create %s", topic) + return nil, nil, fmt.Errorf("failed to create %s, %w", topic, err) } logger.Debug("topic created") _, err = client.CreateSubscription(ctx, subscription.ID(), pubsub.SubscriptionConfig{Topic: topic}) if err != nil { client.Close() - return nil, nil, errors.Wrapf(err, "failed to create %s for %s", subscription, topic) + return nil, nil, fmt.Errorf("failed to create %s for %s, %w", subscription, topic, err) } logger.Debug("subscription created") return client, subscription, nil diff --git a/eventsources/sources/gcppubsub/validate_test.go b/eventsources/sources/gcppubsub/validate_test.go index 66e7b2f89f..365790d6ce 100644 --- a/eventsources/sources/gcppubsub/validate_test.go +++ b/eventsources/sources/gcppubsub/validate_test.go @@ -19,7 +19,7 @@ package gcppubsub import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "must specify topic or subscriptionID", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "gcp-pubsub.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "gcp-pubsub.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/generic/generic.pb.go b/eventsources/sources/generic/generic.pb.go index 5c1c7fba75..6b99ed4064 100644 --- a/eventsources/sources/generic/generic.pb.go +++ b/eventsources/sources/generic/generic.pb.go @@ -73,7 +73,7 @@ func (m *EventSource) GetConfig() []byte { return nil } -//* +// * // Represents an event type Event struct { // The event source name. diff --git a/eventsources/sources/generic/start.go b/eventsources/sources/generic/start.go index 24ec184e75..beee0b2f6e 100644 --- a/eventsources/sources/generic/start.go +++ b/eventsources/sources/generic/start.go @@ -6,14 +6,15 @@ import ( fmt "fmt" "time" - "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -47,7 +48,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening listens to generic events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { logger := logging.FromContext(ctx). With(zap.String(logging.LabelEventSourceType, string(el.GetEventSourceType())), zap.String(logging.LabelEventName, el.GetEventName()), @@ -93,7 +94,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } } -func (el *EventListener) handleOne(event *Event, dispatch func([]byte) error, logger *zap.SugaredLogger) error { +func (el *EventListener) handleOne(event *Event, dispatch func([]byte, ...eventsourcecommon.Option) error, logger *zap.SugaredLogger) error { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) @@ -109,11 +110,11 @@ func (el *EventListener) handleOne(event *Event, dispatch func([]byte) error, lo } eventBytes, err := json.Marshal(eventData) if err != nil { - return errors.Wrap(err, "failed to marshal the event data") + return fmt.Errorf("failed to marshal the event data, %w", err) } logger.Info("dispatching event...") if err := dispatch(eventBytes); err != nil { - return errors.Wrap(err, "failed to dispatch a Generic event") + return fmt.Errorf("failed to dispatch a Generic event, %w", err) } return nil } @@ -122,7 +123,7 @@ func (el *EventListener) connect() (Eventing_StartEventSourceClient, error) { var opt []grpc.DialOption opt = append(opt, grpc.WithBlock()) if el.GenericEventSource.Insecure { - opt = append(opt, grpc.WithInsecure()) + opt = append(opt, grpc.WithTransportCredentials(insecure.NewCredentials())) } conn, err := grpc.DialContext(context.Background(), el.GenericEventSource.URL, opt...) if err != nil { diff --git a/eventsources/sources/generic/validate_test.go b/eventsources/sources/generic/validate_test.go index e67f248018..d227fc92ac 100644 --- a/eventsources/sources/generic/validate_test.go +++ b/eventsources/sources/generic/validate_test.go @@ -3,7 +3,7 @@ package generic import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -19,7 +19,7 @@ func TestEventListener_ValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "server url can't be empty", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "generic.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "generic.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/gerrit/hook_util.go b/eventsources/sources/gerrit/hook_util.go new file mode 100644 index 0000000000..4cc661b03a --- /dev/null +++ b/eventsources/sources/gerrit/hook_util.go @@ -0,0 +1,73 @@ +package gerrit + +import ( + "fmt" + "net/url" + + "github.com/andygrunwald/go-gerrit" +) + +func newGerritWebhookService(client *gerrit.Client) *gerritWebhookService { + return &gerritWebhookService{client: client} +} + +// GerritWebhook contains functions for querying the API provided by the core webhook plugin. +// endpoints Refs: https://github.com/GerritCodeReview/plugins_webhooks/blob/master/src/main/resources/Documentation/rest-api-config.md +type gerritWebhookService struct { + client *gerrit.Client +} + +func (g *gerritWebhookService) List(project string) (map[string]*ProjectHookConfigs, error) { + endpoints := fmt.Sprintf("/config/server/webhooks~projects/%s/remotes/", url.QueryEscape(project)) + req, err := g.client.NewRequest("GET", endpoints, nil) + if err != nil { + return nil, err + } + hooks := make(map[string]*ProjectHookConfigs) + _, err = g.client.Do(req, &hooks) + if err != nil { + return nil, err + } + return hooks, nil +} + +func (g *gerritWebhookService) Get(project, remoteName string) (*ProjectHookConfigs, error) { + endpoints := fmt.Sprintf("/config/server/webhooks~projects/%s/remotes/%s/", url.QueryEscape(project), url.QueryEscape(remoteName)) + req, err := g.client.NewRequest("GET", endpoints, nil) + if err != nil { + return nil, err + } + hook := new(ProjectHookConfigs) + _, err = g.client.Do(req, hook) + if err != nil { + return nil, err + } + return hook, nil +} + +func (g *gerritWebhookService) Create(project, remoteName string, hook *ProjectHookConfigs) (*ProjectHookConfigs, error) { + endpoints := fmt.Sprintf("/config/server/webhooks~projects/%s/remotes/%s/", url.QueryEscape(project), url.QueryEscape(remoteName)) + req, err := g.client.NewRequest("PUT", endpoints, hook) + if err != nil { + return nil, err + } + res := new(ProjectHookConfigs) + _, err = g.client.Do(req, res) + if err != nil { + return nil, err + } + return res, nil +} + +func (g *gerritWebhookService) Delete(project, remoteName string) error { + endpoints := fmt.Sprintf("/config/server/webhooks~projects/%s/remotes/%s/", url.QueryEscape(project), url.QueryEscape(remoteName)) + req, err := g.client.NewRequest("DELETE", endpoints, nil) + if err != nil { + return err + } + _, err = g.client.Do(req, nil) + if err != nil { + return err + } + return nil +} diff --git a/eventsources/sources/gerrit/start.go b/eventsources/sources/gerrit/start.go new file mode 100644 index 0000000000..af1101b441 --- /dev/null +++ b/eventsources/sources/gerrit/start.go @@ -0,0 +1,245 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gerrit + +import ( + "context" + "crypto/rand" + "encoding/json" + "fmt" + "io" + "math/big" + "net/http" + "time" + + "github.com/andygrunwald/go-gerrit" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/events" +) + +// controller controls the webhook operations +var ( + controller = webhook.NewController() +) + +// set up the activation and inactivation channels to control the state of routes. +func init() { + go webhook.ProcessRouteStatus(controller) +} + +// Implement Router +// 1. GetRoute +// 2. HandleRoute +// 3. PostActivate +// 4. PostDeactivate + +// GetRoute returns the route +func (router *Router) GetRoute() *webhook.Route { + return router.route +} + +// HandleRoute handles incoming requests on the route +func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) { + route := router.GetRoute() + logger := route.Logger.With( + logging.LabelEndpoint, route.Context.Endpoint, + logging.LabelPort, route.Context.Port, + logging.LabelHTTPMethod, route.Context.Method, + ) + + logger.Info("received a request, processing it...") + + if !route.Active { + logger.Info("endpoint is not active, won't process the request") + common.SendErrorResponse(writer, "inactive endpoint") + return + } + + defer func(start time.Time) { + route.Metrics.EventProcessingDuration(route.EventSourceName, route.EventName, float64(time.Since(start)/time.Millisecond)) + }(time.Now()) + + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) + body, err := io.ReadAll(request.Body) + if err != nil { + logger.Errorw("failed to parse request body", zap.Error(err)) + common.SendErrorResponse(writer, err.Error()) + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return + } + + event := &events.GerritEventData{ + Headers: request.Header, + Body: (*json.RawMessage)(&body), + Metadata: router.gerritEventSource.Metadata, + } + + eventBody, err := json.Marshal(event) + if err != nil { + logger.Info("failed to marshal event") + common.SendErrorResponse(writer, "invalid event") + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return + } + + logger.Info("dispatching event on route's data channel") + route.DataCh <- eventBody + + logger.Info("request successfully processed") + common.SendSuccessResponse(writer, "success") +} + +// PostActivate performs operations once the route is activated and ready to consume requests +func (router *Router) PostActivate() error { + return nil +} + +// PostInactivate performs operations after the route is inactivated +func (router *Router) PostInactivate() error { + gerritEventSource := router.gerritEventSource + if !gerritEventSource.NeedToCreateHooks() || !gerritEventSource.DeleteHookOnFinish { + return nil + } + + logger := router.route.Logger + logger.Info("deleting Gerrit hooks...") + + for _, p := range gerritEventSource.Projects { + _, ok := router.projectHooks[p] + if !ok { + return fmt.Errorf("can not find hook ID for project %s", p) + } + if err := router.gerritHookService.Delete(p, gerritEventSource.HookName); err != nil { + return fmt.Errorf("failed to delete hook for project %s. err: %w", p, err) + } + logger.Infof("Gerrit hook deleted for project %s", p) + } + return nil +} + +// StartListening starts an event source +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + logger := logging.FromContext(ctx). + With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) + logger.Info("started processing the Gerrit event source...") + + defer sources.Recover(el.GetEventName()) + + gerritEventSource := &el.GerritEventSource + + route := webhook.NewRoute(gerritEventSource.Webhook, logger, el.GetEventSourceName(), el.GetEventName(), el.Metrics) + router := &Router{ + route: route, + gerritEventSource: gerritEventSource, + projectHooks: make(map[string]string), + } + + if gerritEventSource.NeedToCreateHooks() { + // In order to set up a hook for the Gerrit project, + // 1. Set up Gerrit client with basic auth + // 2. Configure Hook with given event type + // 3. Create project hook + + logger.Info("retrieving the access token credentials...") + + formattedURL := common.FormattedURL(gerritEventSource.Webhook.URL, gerritEventSource.Webhook.Endpoint) + opt := &ProjectHookConfigs{ + URL: formattedURL, + Events: router.gerritEventSource.Events, + SslVerify: router.gerritEventSource.SslVerify, + } + + logger.Info("setting up the client to connect to Gerrit...") + var err error + router.gerritClient, err = gerrit.NewClient(router.gerritEventSource.GerritBaseURL, nil) + if err != nil { + return fmt.Errorf("failed to initialize client, %w", err) + } + if gerritEventSource.Auth != nil { + username, err := common.GetSecretFromVolume(gerritEventSource.Auth.Username) + if err != nil { + return fmt.Errorf("username not found, %w", err) + } + password, err := common.GetSecretFromVolume(gerritEventSource.Auth.Password) + if err != nil { + return fmt.Errorf("password not found, %w", err) + } + router.gerritClient.Authentication.SetBasicAuth(username, password) + } + router.gerritHookService = newGerritWebhookService(router.gerritClient) + + f := func() { + for _, p := range gerritEventSource.Projects { + hooks, err := router.gerritHookService.List(p) + if err != nil { + logger.Errorf("failed to list existing webhooks of project %s. err: %+v", p, err) + continue + } + // hook already exist + if h, ok := hooks[gerritEventSource.HookName]; ok { + if h.URL == formattedURL { + router.projectHooks[p] = gerritEventSource.HookName + continue + } + } + logger.Infof("hook not found for project %s, creating ...", p) + if _, err := router.gerritHookService.Create(p, gerritEventSource.HookName, opt); err != nil { + logger.Errorf("failed to create gerrit webhook for project %s. err: %+v", p, err) + continue + } + router.projectHooks[p] = gerritEventSource.HookName + time.Sleep(500 * time.Millisecond) + } + } + + // Mitigate race condtions - it might create multiple hooks with same config when replicas > 1 + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(2000))) + time.Sleep(time.Duration(randomNum.Int64()) * time.Millisecond) + f() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + // Another kind of race conditions might happen when pods do rolling upgrade - new pod starts + // and old pod terminates, if DeleteHookOnFinish is true, the hook will be deleted from gerrit. + // This is a workround to mitigate the race conditions. + logger.Info("starting gerrit hooks manager daemon") + ticker := time.NewTicker(60 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.Info("exiting gerrit hooks manager daemon") + return + case <-ticker.C: + f() + } + } + }() + } else { + logger.Info("no need to create webhooks") + } + + return webhook.ManageRoute(ctx, router, controller, dispatch) +} diff --git a/eventsources/sources/gerrit/types.go b/eventsources/sources/gerrit/types.go new file mode 100644 index 0000000000..6d90f56892 --- /dev/null +++ b/eventsources/sources/gerrit/types.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gerrit + +import ( + "github.com/andygrunwald/go-gerrit" + + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/metrics" + apiCommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// EventListener implements ConfigExecutor +type EventListener struct { + EventSourceName string + EventName string + GerritEventSource v1alpha1.GerritEventSource + Metrics *metrics.Metrics +} + +// GetEventSourceName returns name of event source +func (el *EventListener) GetEventSourceName() string { + return el.EventSourceName +} + +// GetEventName returns name of event +func (el *EventListener) GetEventName() string { + return el.EventName +} + +// GetEventSourceType return type of event server +func (el *EventListener) GetEventSourceType() apiCommon.EventSourceType { + return apiCommon.GerritEvent +} + +// Router contains the configuration information for a route +type Router struct { + // route contains information about a API endpoint + route *webhook.Route + // gerritClient is the client to connect to Gerrit + gerritClient *gerrit.Client + // gerritClient is the client to connect to Gerrit + gerritHookService *gerritWebhookService + // project -> hook + projectHooks map[string]string + // gerritEventSource is the event source that contains configuration necessary to consume events from Gerrit + gerritEventSource *v1alpha1.GerritEventSource +} + +// ProjectHookConfigs is the config for gerrit project +// Ref: https://gerrit.googlesource.com/plugins/webhooks/+doc/master/src/main/resources/Documentation/config.md +type ProjectHookConfigs struct { + // URL: Address of the remote server to post events to + URL string `json:"url,omitempty"` + // Events: + // Type of the event which will be posted to the remote url. Multiple event types can be specified, listing event types which should be posted. + // When no event type is configured, all events will be posted. + Events []string `json:"events,omitempty"` + // ConnectionTimeout: + // Maximum interval of time in milliseconds the plugin waits for a connection to the target instance. + // When not specified, the default value is derrived from global configuration. + ConnectionTimeout string `json:"connectionTimeout,omitempty"` + // SocketTimeout: + // Maximum interval of time in milliseconds the plugin waits for a response from the target instance once the connection has been established. + // When not specified, the default value is derrived from global configuration. + SocketTimeout string `json:"socketTimeout,omitempty"` + // MaxTries: + // Maximum number of times the plugin should attempt when posting an event to the target url. Setting this value to 0 will disable retries. + // When not specified, the default value is derrived from global configuration. + MaxTries string `json:"maxTries,omitempty"` + // RetryInterval: + // The interval of time in milliseconds between the subsequent auto-retries. + // When not specified, the default value is derrived from global configuration. + RetryInterval string `json:"retryInterval,omitempty"` + // SslVerify: + // When 'true' SSL certificate verification of remote url is performed when payload is delivered, the default value is derived from global configuration. + SslVerify bool `json:"sslVerify,omitempty"` +} diff --git a/eventsources/sources/gerrit/validate.go b/eventsources/sources/gerrit/validate.go new file mode 100644 index 0000000000..3dbc5b0b9c --- /dev/null +++ b/eventsources/sources/gerrit/validate.go @@ -0,0 +1,47 @@ +/* +Copyright 2018 BlackRock, Inc. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gerrit + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/eventsources/common/webhook" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// ValidateEventSource validates gerrit event source +func (listener *EventListener) ValidateEventSource(ctx context.Context) error { + return validate(&listener.GerritEventSource) +} + +func validate(eventSource *v1alpha1.GerritEventSource) error { + if eventSource == nil { + return common.ErrNilEventSource + } + if len(eventSource.Projects) == 0 { + return fmt.Errorf("projects cannot be empty") + } + if eventSource.Events == nil { + return fmt.Errorf("events can't be empty") + } + if eventSource.GerritBaseURL == "" { + return fmt.Errorf("gerrit base url can't be empty") + } + if eventSource.Auth == nil { + return fmt.Errorf("username and password can't be empty") + } + return webhook.ValidateWebhookContext(eventSource.Webhook) +} diff --git a/eventsources/sources/gerrit/validate_test.go b/eventsources/sources/gerrit/validate_test.go new file mode 100644 index 0000000000..b5bbc6733d --- /dev/null +++ b/eventsources/sources/gerrit/validate_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gerrit + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +func TestValidateEventSource(t *testing.T) { + listener := &EventListener{} + + err := listener.ValidateEventSource(context.Background()) + assert.Error(t, err) + assert.Equal(t, "projects cannot be empty", err.Error()) + + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "gerrit.yaml")) + assert.Nil(t, err) + + var eventSource *v1alpha1.EventSource + err = yaml.Unmarshal(content, &eventSource) + assert.Nil(t, err) + assert.NotNil(t, eventSource.Spec.Gerrit) + + for name, value := range eventSource.Spec.Gerrit { + fmt.Println(name) + l := &EventListener{ + GerritEventSource: value, + } + err := l.ValidateEventSource(context.Background()) + assert.NoError(t, err) + } +} diff --git a/eventsources/sources/github/appauth.go b/eventsources/sources/github/appauth.go new file mode 100644 index 0000000000..66c7e5b1e0 --- /dev/null +++ b/eventsources/sources/github/appauth.go @@ -0,0 +1,45 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package github + +import ( + "net/http" + + "github.com/bradleyfalzon/ghinstallation/v2" +) + +type AppsAuthStrategy struct { + AppID int64 + BaseURL string + InstallationID int64 + PrivateKey string + Transport http.RoundTripper +} + +// AuthTransport implements the AuthStrategy interface. +func (t *AppsAuthStrategy) AuthTransport() (http.RoundTripper, error) { + appTransport, err := ghinstallation.New(t.transport(), t.AppID, t.InstallationID, []byte(t.PrivateKey)) + if appTransport != nil && t.BaseURL != "" { + appTransport.BaseURL = t.BaseURL + } + return appTransport, err +} + +func (t *AppsAuthStrategy) transport() http.RoundTripper { + if t.Transport != nil { + return t.Transport + } + + return http.DefaultTransport +} diff --git a/eventsources/sources/github/hook_util.go b/eventsources/sources/github/hook_util.go index 4191604e72..3e408a20e0 100644 --- a/eventsources/sources/github/hook_util.go +++ b/eventsources/sources/github/hook_util.go @@ -1,50 +1,13 @@ package github import ( - gh "github.com/google/go-github/v31/github" -) - -// sliceEqual returns true if the two provided string slices are equal. -func sliceEqual(first []string, second []string) bool { - if len(first) == 0 && len(second) == 0 { - return true - } - if len(first) == 0 || len(second) == 0 { - return false - } - - tmp := make(map[string]int) - for _, i := range first { - tmp[i] = 1 - } - - for _, i := range second { - v, ok := tmp[i] - if !ok || v == -1 { - tmp[i] = -1 - } else { - tmp[i] = 2 - } - } - - if v, ok := tmp["*"]; ok { - // If both slices contain "*", return true directly - return v == 2 - } + gh "github.com/google/go-github/v50/github" - for _, v := range tmp { - // -1: only exists in second - // 1: only exists in first - // 2: exists in both - if v < 2 { - return false - } - } - return true -} + "github.com/argoproj/argo-events/common" +) // compareHook returns true if the hook matches the url and event. -func compareHook(hook *gh.Hook, url string, event []string) bool { +func compareHook(hook *gh.Hook, url string, events []string) bool { if hook == nil { return false } @@ -53,7 +16,10 @@ func compareHook(hook *gh.Hook, url string, event []string) bool { return false } - return sliceEqual(hook.Events, event) + // Webhook events are equal if both old events slice and new events slice + // contain the same events, or if both have "*" event. + return common.ElementsMatch(hook.Events, events) || + (common.SliceContains(hook.Events, "*") && common.SliceContains(events, "*")) } // getHook returns the hook that matches the url and event, or nil if not found. diff --git a/eventsources/sources/github/hook_util_test.go b/eventsources/sources/github/hook_util_test.go index 70ab05cc2c..ec95cdefe4 100644 --- a/eventsources/sources/github/hook_util_test.go +++ b/eventsources/sources/github/hook_util_test.go @@ -3,36 +3,10 @@ package github import ( "testing" - gh "github.com/google/go-github/v31/github" + gh "github.com/google/go-github/v50/github" "github.com/stretchr/testify/assert" ) -func TestSliceEqual(t *testing.T) { - assert.True(t, sliceEqual(nil, nil)) - assert.True(t, sliceEqual([]string{"hello"}, []string{"hello"})) - assert.True(t, sliceEqual([]string{"hello", "world"}, []string{"hello", "world"})) - assert.True(t, sliceEqual([]string{}, []string{})) - - assert.False(t, sliceEqual([]string{"hello"}, nil)) - assert.False(t, sliceEqual([]string{"hello"}, []string{})) - assert.False(t, sliceEqual([]string{}, []string{"hello"})) - assert.False(t, sliceEqual([]string{"hello"}, []string{"hello", "world"})) - assert.False(t, sliceEqual([]string{"hello", "world"}, []string{"hello"})) - assert.False(t, sliceEqual([]string{"hello", "world"}, []string{"hello", "moon"})) - assert.True(t, sliceEqual([]string{"hello", "world"}, []string{"world", "hello"})) - assert.True(t, sliceEqual([]string{"hello", "*"}, []string{"*"})) - assert.True(t, sliceEqual([]string{"hello", "*"}, []string{"*", "world"})) - assert.True(t, sliceEqual([]string{"hello", "world", "hello"}, []string{"hello", "hello", "world", "world"})) - assert.True(t, sliceEqual([]string{"world", "hello"}, []string{"hello", "hello", "world", "world"})) - assert.True(t, sliceEqual([]string{"hello", "hello", "world", "world"}, []string{"world", "hello"})) - assert.False(t, sliceEqual([]string{"hello"}, []string{"*", "hello"})) - assert.False(t, sliceEqual([]string{"hello", "*"}, []string{"hello"})) - assert.False(t, sliceEqual([]string{"*", "hello", "*"}, []string{"hello"})) - assert.False(t, sliceEqual([]string{"hello"}, []string{"world", "world"})) - assert.False(t, sliceEqual([]string{"hello", "hello"}, []string{"world", "world"})) - assert.True(t, sliceEqual([]string{"*", "hello", "*"}, []string{"*", "world", "hello", "world"})) -} - func TestCompareHook(t *testing.T) { assert.False(t, compareHook(nil, "https://google.com/", []string{})) diff --git a/eventsources/sources/github/start.go b/eventsources/sources/github/start.go index b7f42a2c8b..de900b1ad6 100644 --- a/eventsources/sources/github/start.go +++ b/eventsources/sources/github/start.go @@ -1,5 +1,4 @@ /* -Copyright 2018 KompiTech GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,28 +17,34 @@ package github import ( "context" + "crypto/rand" "encoding/json" "fmt" - "math/rand" + "math/big" "net/http" "net/url" + "strings" "time" - gh "github.com/google/go-github/v31/github" + gh "github.com/google/go-github/v50/github" "github.com/pkg/errors" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/webhook" "github.com/argoproj/argo-events/pkg/apis/events" ) -// GitHub headers const ( + // GitHub headers githubEventHeader = "X-GitHub-Event" githubDeliveryHeader = "X-GitHub-Delivery" + + // Key names in Extras map (payload enrichment flags feature) + pullRequestExtrasKey = "pull_request" // holds PR info ) // controller controls the webhook operations @@ -52,17 +57,61 @@ func init() { go webhook.ProcessRouteStatus(controller) } -// getCredentials for retrieves credentials for GitHub connection +// getCredentials retrieves credentials for GitHub connection func (router *Router) getCredentials(keySelector *corev1.SecretKeySelector) (*cred, error) { token, err := common.GetSecretFromVolume(keySelector) if err != nil { - return nil, errors.Wrap(err, "token not founnd") + return nil, fmt.Errorf("secret not found, %w", err) } + return &cred{ secret: token, }, nil } +// getAPITokenAuthStrategy return an TokenAuthStrategy initialised with +// the GitHub API token provided by the user +func (router *Router) getAPITokenAuthStrategy() (*TokenAuthStrategy, error) { + apiTokenCreds, err := router.getCredentials(router.githubEventSource.APIToken) + if err != nil { + return nil, fmt.Errorf("failed to retrieve api token credentials, %w", err) + } + + return &TokenAuthStrategy{ + Token: apiTokenCreds.secret, + }, nil +} + +// getGithubAppAuthStrategy return an AppsAuthStrategy initialised with +// the GitHub App credentials provided by the user +func (router *Router) getGithubAppAuthStrategy() (*AppsAuthStrategy, error) { + appCreds := router.githubEventSource.GithubApp + githubAppPrivateKey, err := router.getCredentials(appCreds.PrivateKey) + if err != nil { + return nil, fmt.Errorf("failed to retrieve github app credentials, %w", err) + } + + return &AppsAuthStrategy{ + AppID: appCreds.AppID, + BaseURL: router.githubEventSource.GithubBaseURL, + InstallationID: appCreds.InstallationID, + PrivateKey: githubAppPrivateKey.secret, + }, nil +} + +// chooseAuthStrategy returns an AuthStrategy based on the given credentials +func (router *Router) chooseAuthStrategy() (AuthStrategy, error) { + es := router.githubEventSource + switch { + case es.HasGithubAPIToken(): + return router.getAPITokenAuthStrategy() + case es.HasGithubAppCreds(): + return router.getGithubAppAuthStrategy() + default: + return nil, fmt.Errorf("none of the supported auth options were provided") + } +} + // Implement Router // 1. GetRoute // 2. HandleRoute @@ -74,6 +123,38 @@ func (router *Router) GetRoute() *webhook.Route { return router.route } +func (router *Router) isPRCommentAddedEvent(eventPayload common.Object) bool { + githubEvent := eventPayload[githubEventHeader] + githubAction := eventPayload["action"] + if githubEvent == "issue_comment" && githubAction == "created" { + issueInfo := eventPayload["issue"].(common.Object) + if prInfo, ok := issueInfo["pull_request"]; ok { + if _, ok := prInfo.(common.Object)["url"]; ok { + return true + } + } + } + + return false +} + +func (router *Router) getPRFromPRCommentAddedEvent(eventPayload common.Object) ([]byte, error) { + prNumber := int(eventPayload["issue"].(common.Object)["number"].(float64)) + repoMeta := eventPayload["repository"].(common.Object) + repoOwner := repoMeta["owner"].(common.Object)["login"].(string) + repoName := repoMeta["name"].(string) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + pr, _, err := router.githubClient.PullRequests.Get(ctx, repoOwner, repoName, prNumber) + if err != nil { + return nil, errors.Wrapf(err, "failed to get PR for repo %s/%s", repoOwner, repoName) + } + + return json.Marshal(pr) +} + // HandleRoute handles incoming requests on the route func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) { route := router.route @@ -96,6 +177,7 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ route.Metrics.EventProcessingDuration(route.EventSourceName, route.EventName, float64(time.Since(start)/time.Millisecond)) }(time.Now()) + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) body, err := parseValidateRequest(request, []byte(router.hookSecret)) if err != nil { logger.Errorw("request is not valid event notification, discarding it", zap.Error(err)) @@ -103,10 +185,26 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ return } + extras, err := router.fetchExtras(body) + if err != nil { + logger.Errorw("failed to enrich event payload with additional information", zap.Error(err)) + common.SendErrorResponse(writer, err.Error()) + return + } + + jsonBody, err := json.Marshal(body) + if err != nil { + logger.Info("failed to marshal event body") + common.SendErrorResponse(writer, "invalid event") + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return + } + event := &events.GithubEventData{ Headers: request.Header, - Body: (*json.RawMessage)(&body), + Body: (*json.RawMessage)(&jsonBody), Metadata: router.githubEventSource.Metadata, + Extras: extras, } eventBody, err := json.Marshal(event) @@ -124,6 +222,20 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ common.SendSuccessResponse(writer, "success") } +func (router *Router) fetchExtras(eventPayload common.Object) (map[string]*json.RawMessage, error) { + extras := make(map[string]*json.RawMessage) + if router.githubEventSource.PayloadEnrichment.FetchPROnPRCommentAdded && router.isPRCommentAddedEvent(eventPayload) { + pr, err := router.getPRFromPRCommentAddedEvent(eventPayload) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch PR info for PR comment added event") + } + + extras[pullRequestExtrasKey] = (*json.RawMessage)(&pr) + } + + return extras, nil +} + // PostActivate performs operations once the route is activated and ready to consume requests func (router *Router) PostActivate() error { return nil @@ -135,18 +247,33 @@ func (router *Router) PostInactivate() error { if githubEventSource.NeedToCreateHooks() && githubEventSource.DeleteHookOnFinish { logger := router.route.Logger - logger.Info("deleting GitHub hook...") + logger.Info("deleting GitHub org hooks...") + + for _, org := range githubEventSource.Organizations { + id, ok := router.orgHookIDs[org] + if !ok { + return fmt.Errorf("can not find hook ID for organization %s", org) + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if _, err := router.githubClient.Organizations.DeleteHook(ctx, org, id); err != nil { + return fmt.Errorf("failed to delete hook for organization %s. err: %w", org, err) + } + logger.Infof("GitHub hook deleted for organization %s", org) + } + + logger.Info("deleting GitHub repo hooks...") for _, r := range githubEventSource.GetOwnedRepositories() { for _, n := range r.Names { - id, ok := router.hookIDs[r.Owner+","+n] + id, ok := router.repoHookIDs[r.Owner+","+n] if !ok { - return errors.Errorf("can not find hook ID for repo %s/%s", r.Owner, n) + return fmt.Errorf("can not find hook ID for repo %s/%s", r.Owner, n) } ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if _, err := router.githubClient.Repositories.DeleteHook(ctx, r.Owner, n, id); err != nil { - return errors.Errorf("failed to delete hook for repo %s/%s. err: %+v", r.Owner, n, err) + return fmt.Errorf("failed to delete hook for repo %s/%s. err: %w", r.Owner, n, err) } logger.Infof("GitHub hook deleted for repo %s/%s", r.Owner, n) } @@ -156,7 +283,7 @@ func (router *Router) PostInactivate() error { } // StartListening starts an event source -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { logger := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) logger.Info("started processing the Github event source...") @@ -171,7 +298,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if githubEventSource.WebhookSecret != nil { webhookSecretCreds, err := router.getCredentials(githubEventSource.WebhookSecret) if err != nil { - return errors.Errorf("failed to retrieve webhook secret. err: %+v", err) + return fmt.Errorf("failed to retrieve webhook secret. err: %w", err) } router.hookSecret = webhookSecretCreds.secret } @@ -180,21 +307,32 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt // create webhooks // In order to successfully setup a GitHub hook for the given repository, - // 1. Get the API Token and Webhook secret from K8s secrets - // 2. Configure the hook with url, content type, ssl etc. - // 3. Set up a GitHub client - // 4. Set the base and upload url for the client - // 5. Create the hook if one doesn't exist already. If exists already, then use that one. - - logger.Info("retrieving api token credentials...") - apiTokenCreds, err := router.getCredentials(githubEventSource.APIToken) + // 1. Parse and validate base and upload url if provided + // 2. Get the GitHub auth credentials and Webhook secret from K8s secrets + // 3. Configure the hook with url, content type, ssl etc. + // 4. Set up a GitHub client + // 5. Set the base and upload url for the client + // 6. Create the hook if one doesn't exist already. If exists already, then use that one. + + baseURL, err := parseUrlWithSlash(&githubEventSource.GithubBaseURL) if err != nil { - return errors.Errorf("failed to retrieve api token credentials. err: %+v", err) + return fmt.Errorf("failed to parse github base url. err: %v", err) + } + uploadURL, err := parseUrlWithSlash(&githubEventSource.GithubUploadURL) + if err != nil { + return fmt.Errorf("failed to parse github upload url. err: %v", err) } - logger.Info("setting up auth with api token...") - PATTransport := TokenAuthTransport{ - Token: apiTokenCreds.secret, + logger.Info("choosing github auth strategy...") + authStrategy, err := router.chooseAuthStrategy() + if err != nil { + return fmt.Errorf("failed to get github auth strategy, %w", err) + } + + logger.Info("setting up auth transport for http client with the chosen strategy...") + authTransport, err := authStrategy.AuthTransport() + if err != nil { + return fmt.Errorf("failed to set up auth transport for http client, %w", err) } logger.Info("configuring GitHub hook...") @@ -215,27 +353,17 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } logger.Info("setting up client for GitHub...") - client := gh.NewClient(PATTransport.Client()) - - logger.Info("setting up base url for GitHub client...") - if githubEventSource.GithubBaseURL != "" { - baseURL, err := url.Parse(githubEventSource.GithubBaseURL) - if err != nil { - return fmt.Errorf("failed to parse github base url. err: %v", err) - } + client := gh.NewClient(&http.Client{Transport: authTransport}) + if baseURL != nil && uploadURL != nil { + logger.Info("setting up client for GitHub Enterprise...") client.BaseURL = baseURL - } - - logger.Info("setting up the upload url for GitHub client...") - if githubEventSource.GithubUploadURL != "" { - uploadURL, err := url.Parse(githubEventSource.GithubUploadURL) - if err != nil { - return fmt.Errorf("failed to parse github upload url. err: %v", err) - } client.UploadURL = uploadURL } + logger.Infof("client set for baseURL=[%s] uploadURL=[%s]", client.BaseURL, client.UploadURL) + router.githubClient = client - router.hookIDs = make(map[string]int64) + router.repoHookIDs = make(map[string]int64) + router.orgHookIDs = make(map[string]int64) hook := &gh.Hook{ Events: githubEventSource.Events, @@ -247,6 +375,27 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt defer cancel() f := func() { + for _, org := range githubEventSource.Organizations { + hooks, _, err := router.githubClient.Organizations.ListHooks(ctx, org, nil) + if err != nil { + logger.Errorf("failed to list existing webhooks of organization %s. err: %+v", org, err) + continue + } + h := getHook(hooks, formattedURL, githubEventSource.Events) + if h != nil { + router.orgHookIDs[org] = *h.ID + continue + } + logger.Infof("hook not found for organization %s, creating ...", org) + h, _, err = router.githubClient.Organizations.CreateHook(ctx, org, hook) + if err != nil { + logger.Errorf("failed to create github webhook for organization %s. err: %+v", org, err) + continue + } + router.orgHookIDs[org] = *h.ID + time.Sleep(500 * time.Millisecond) + } + for _, r := range githubEventSource.GetOwnedRepositories() { for _, name := range r.Names { hooks, _, err := router.githubClient.Repositories.ListHooks(ctx, r.Owner, name, nil) @@ -256,7 +405,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } h := getHook(hooks, formattedURL, githubEventSource.Events) if h != nil { - router.hookIDs[r.Owner+","+name] = *h.ID + router.repoHookIDs[r.Owner+","+name] = *h.ID continue } logger.Infof("hook not found for %s/%s, creating ...", r.Owner, name) @@ -265,36 +414,29 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt logger.Errorf("failed to create github webhook for %s/%s. err: %+v", r.Owner, name, err) continue } - router.hookIDs[r.Owner+","+name] = *h.ID + router.repoHookIDs[r.Owner+","+name] = *h.ID time.Sleep(500 * time.Millisecond) } } } - // Github can not handle race condtions well - it might create multiple hooks with same config + // Github can not handle race conditions well - it might create multiple hooks with same config // when replicas > 1 // Randomly sleep some time to mitigate the issue. - s1 := rand.NewSource(time.Now().UnixNano()) - r1 := rand.New(s1) - time.Sleep(time.Duration(r1.Intn(2000)) * time.Millisecond) + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(2000))) + time.Sleep(time.Duration(randomNum.Int64()) * time.Millisecond) f() go func() { // Another kind of race conditions might happen when pods do rolling upgrade - new pod starts // and old pod terminates, if DeleteHookOnFinish is true, the hook will be deleted from github. - // This is a workround to mitigate the race conditions. + // This is a workaround to mitigate the race conditions. logger.Info("starting github hooks manager daemon") - ticker := time.NewTicker(60 * time.Second) - defer ticker.Stop() - for { - select { - case <-ctx.Done(): - logger.Info("exiting github hooks manager daemon") - return - case <-ticker.C: - f() - } + for i := 0; i < 10; i++ { + time.Sleep(60 * time.Second) + f() } + logger.Info("exiting github hooks manager daemon") }() } else { logger.Info("no need to create webhooks") @@ -304,7 +446,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } // parseValidateRequest parses a http request and checks if it is valid GitHub notification -func parseValidateRequest(r *http.Request, secret []byte) ([]byte, error) { +func parseValidateRequest(r *http.Request, secret []byte) (map[string]interface{}, error) { body, err := gh.ValidatePayload(r, secret) if err != nil { return nil, err @@ -320,5 +462,16 @@ func parseValidateRequest(r *http.Request, secret []byte) ([]byte, error) { } { payload[h] = r.Header.Get(h) } - return json.Marshal(payload) + return payload, nil +} + +// parseUrlWithSlash parses URL and enforces trailing slash expected by GitHub client +func parseUrlWithSlash(urlStr *string) (*url.URL, error) { + if *urlStr == "" { + return nil, nil + } + if !strings.HasSuffix(*urlStr, "/") { + *urlStr += "/" + } + return url.Parse(*urlStr) } diff --git a/eventsources/sources/github/start_test.go b/eventsources/sources/github/start_test.go index 5cfd1a9a31..92b630c25f 100644 --- a/eventsources/sources/github/start_test.go +++ b/eventsources/sources/github/start_test.go @@ -18,8 +18,7 @@ package github import ( "bytes" - "encoding/json" - "io/ioutil" + "io" "net/http" "testing" @@ -74,7 +73,7 @@ func TestRouteActiveHandler(t *testing.T) { convey.So(err, convey.ShouldBeNil) router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(body)), + Body: io.NopCloser(bytes.NewReader(body)), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest) @@ -82,7 +81,7 @@ func TestRouteActiveHandler(t *testing.T) { route.Active = true router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(body)), + Body: io.NopCloser(bytes.NewReader(body)), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest) @@ -121,7 +120,7 @@ func TestRouteActiveHandlerDeprecated(t *testing.T) { convey.So(err, convey.ShouldBeNil) router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(body)), + Body: io.NopCloser(bytes.NewReader(body)), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest) @@ -129,7 +128,7 @@ func TestRouteActiveHandlerDeprecated(t *testing.T) { route.Active = true router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(body)), + Body: io.NopCloser(bytes.NewReader(body)), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest) @@ -154,12 +153,10 @@ func TestAddEventTypeBody(t *testing.T) { convey.Convey("Delivery headers should be written to message", func() { body, err := parseValidateRequest(request, []byte{}) convey.So(err, convey.ShouldBeNil) - payload := make(map[string]interface{}) - err = json.Unmarshal(body, &payload) convey.So(err, convey.ShouldBeNil) convey.So(err, convey.ShouldBeNil) - convey.So(payload["X-GitHub-Event"], convey.ShouldEqual, eventType) - convey.So(payload["X-GitHub-Delivery"], convey.ShouldEqual, deliveryID) + convey.So(body["X-GitHub-Event"], convey.ShouldEqual, eventType) + convey.So(body["X-GitHub-Delivery"], convey.ShouldEqual, deliveryID) }) }) } diff --git a/eventsources/sources/github/tokenauth.go b/eventsources/sources/github/tokenauth.go index 41571606d9..5e29fcd9ba 100644 --- a/eventsources/sources/github/tokenauth.go +++ b/eventsources/sources/github/tokenauth.go @@ -1,5 +1,5 @@ /* -Copyright 2018 KompiTech GmbH + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -15,13 +15,13 @@ package github import "net/http" -type TokenAuthTransport struct { +type TokenAuthStrategy struct { Token string Transport http.RoundTripper } // RoundTrip implements the RoundTripper interface. -func (t *TokenAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) { +func (t *TokenAuthStrategy) RoundTrip(req *http.Request) (*http.Response, error) { // To set extra headers, we must make a copy of the Request so // that we don't modify the Request we were given. This is required by the // specification of http.RoundTripper. @@ -39,15 +39,15 @@ func (t *TokenAuthTransport) RoundTrip(req *http.Request) (*http.Response, error return t.transport().RoundTrip(req2) } -// Client returns an *http.Client that makes requests that are authenticated -// using HTTP Basic Authentication. -func (t *TokenAuthTransport) Client() *http.Client { - return &http.Client{Transport: t} +// AuthTransport implements the AuthStrategy interface. +func (t *TokenAuthStrategy) AuthTransport() (http.RoundTripper, error) { + return t, nil } -func (t *TokenAuthTransport) transport() http.RoundTripper { +func (t *TokenAuthStrategy) transport() http.RoundTripper { if t.Transport != nil { return t.Transport } + return http.DefaultTransport } diff --git a/eventsources/sources/github/types.go b/eventsources/sources/github/types.go index 0d7879c3e3..948865c132 100644 --- a/eventsources/sources/github/types.go +++ b/eventsources/sources/github/types.go @@ -1,5 +1,4 @@ /* -Copyright 2018 KompiTech GmbH Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,10 +16,12 @@ limitations under the License. package github import ( - "github.com/google/go-github/v31/github" + "net/http" + + "github.com/google/go-github/v50/github" "github.com/argoproj/argo-events/eventsources/common/webhook" - metrics "github.com/argoproj/argo-events/metrics" + "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" ) @@ -57,7 +58,9 @@ type Router struct { // githubClient is the client to connect to GitHub githubClient *github.Client // (owner + "," + repo name) -> hook ID - hookIDs map[string]int64 + repoHookIDs map[string]int64 + // org name -> hook ID + orgHookIDs map[string]int64 // hookSecret is a GitHub webhook secret hookSecret string } @@ -66,3 +69,10 @@ type Router struct { type cred struct { secret string } + +// AuthStrategy is implemented by the different GitHub auth strategies that are supported +type AuthStrategy interface { + // AuthTransport returns an http.RoundTripper that is used with an http.Client to make + // authenticated requests using HTTP Basic Authentication. + AuthTransport() (http.RoundTripper, error) +} diff --git a/eventsources/sources/github/validate.go b/eventsources/sources/github/validate.go index 6f51311cd7..22243fe618 100644 --- a/eventsources/sources/github/validate.go +++ b/eventsources/sources/github/validate.go @@ -1,5 +1,5 @@ /* -Copyright 2018 KompiTech GmbH + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at @@ -31,8 +31,11 @@ func validate(githubEventSource *v1alpha1.GithubEventSource) error { if githubEventSource == nil { return common.ErrNilEventSource } - if githubEventSource.GetOwnedRepositories() == nil { - return fmt.Errorf("no valid repository owner and name found") + if githubEventSource.GetOwnedRepositories() == nil && githubEventSource.Organizations == nil { + return fmt.Errorf("either repositories or organizations is required") + } + if githubEventSource.GetOwnedRepositories() != nil && githubEventSource.Organizations != nil { + return fmt.Errorf("only one of repositories and organizations is allowed") } if githubEventSource.NeedToCreateHooks() && len(githubEventSource.Events) == 0 { return fmt.Errorf("events must be defined to create a github webhook") @@ -43,5 +46,16 @@ func validate(githubEventSource *v1alpha1.GithubEventSource) error { return fmt.Errorf("content type must be \"json\" or \"form\"") } } + + // in order to avoid requests ending accidentally to public GitHub, + // make sure that both are set if either one is provided + if githubEventSource.GithubBaseURL != "" || githubEventSource.GithubUploadURL != "" { + if githubEventSource.GithubBaseURL == "" { + return fmt.Errorf("githubBaseURL is required when githubUploadURL is set") + } + if githubEventSource.GithubUploadURL == "" { + return fmt.Errorf("githubUploadURL is required when githubBaseURL is set") + } + } return webhook.ValidateWebhookContext(githubEventSource.Webhook) } diff --git a/eventsources/sources/github/validate_test.go b/eventsources/sources/github/validate_test.go index ef9517bba5..9062a93add 100644 --- a/eventsources/sources/github/validate_test.go +++ b/eventsources/sources/github/validate_test.go @@ -19,7 +19,7 @@ package github import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -30,12 +30,11 @@ import ( func TestValidateEventSource(t *testing.T) { listener := &EventListener{} - err := listener.ValidateEventSource(context.Background()) assert.Error(t, err) - assert.Equal(t, "no valid repository owner and name found", err.Error()) + assert.Equal(t, "either repositories or organizations is required", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "github.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "github.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/gitlab/hook_util.go b/eventsources/sources/gitlab/hook_util.go new file mode 100644 index 0000000000..4e0a260c0a --- /dev/null +++ b/eventsources/sources/gitlab/hook_util.go @@ -0,0 +1,25 @@ +package gitlab + +import ( + "github.com/xanzy/go-gitlab" +) + +func getProjectHook(hooks []*gitlab.ProjectHook, url string) *gitlab.ProjectHook { + for _, h := range hooks { + if h.URL != url { + continue + } + return h + } + return nil +} + +func getGroupHook(hooks []*gitlab.GroupHook, url string) *gitlab.GroupHook { + for _, h := range hooks { + if h.URL != url { + continue + } + return h + } + return nil +} diff --git a/eventsources/sources/gitlab/hook_util_test.go b/eventsources/sources/gitlab/hook_util_test.go new file mode 100644 index 0000000000..0d8bdf280a --- /dev/null +++ b/eventsources/sources/gitlab/hook_util_test.go @@ -0,0 +1,36 @@ +package gitlab + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/xanzy/go-gitlab" +) + +func TestGetGroupHook(t *testing.T) { + hooks := []*gitlab.GroupHook{ + { + URL: "https://example0.com/", + }, + { + URL: "https://example1.com/", + }, + } + + assert.Equal(t, hooks[1], getGroupHook(hooks, "https://example1.com/")) + assert.Nil(t, getGroupHook(hooks, "https://example.com/")) +} + +func TestGetProjectHook(t *testing.T) { + hooks := []*gitlab.ProjectHook{ + { + URL: "https://example0.com/", + }, + { + URL: "https://example1.com/", + }, + } + + assert.Equal(t, hooks[1], getProjectHook(hooks, "https://example1.com/")) + assert.Nil(t, getProjectHook(hooks, "https://example.com/")) +} diff --git a/eventsources/sources/gitlab/start.go b/eventsources/sources/gitlab/start.go index c5089f251f..5c1fac493f 100644 --- a/eventsources/sources/gitlab/start.go +++ b/eventsources/sources/gitlab/start.go @@ -18,21 +18,23 @@ package gitlab import ( "context" + "crypto/rand" "encoding/json" - "io/ioutil" + "fmt" + "io" + "math/big" "net/http" "reflect" "time" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/webhook" "github.com/argoproj/argo-events/eventsources/sources" "github.com/argoproj/argo-events/pkg/apis/events" - "github.com/pkg/errors" "github.com/xanzy/go-gitlab" "go.uber.org/zap" - corev1 "k8s.io/api/core/v1" ) // controller controls the webhook operations @@ -45,17 +47,6 @@ func init() { go webhook.ProcessRouteStatus(controller) } -// getCredentials retrieves credentials to connect to GitLab -func (router *Router) getCredentials(keySelector *corev1.SecretKeySelector) (*cred, error) { - token, err := common.GetSecretFromVolume(keySelector) - if err != nil { - return nil, errors.Wrap(err, "token not founnd") - } - return &cred{ - token: token, - }, nil -} - // Implement Router // 1. GetRoute // 2. HandleRoute @@ -89,7 +80,14 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ route.Metrics.EventProcessingDuration(route.EventSourceName, route.EventName, float64(time.Since(start)/time.Millisecond)) }(time.Now()) - body, err := ioutil.ReadAll(request.Body) + if router.secretToken != "" { + if t := request.Header.Get("X-Gitlab-Token"); t != router.secretToken { + common.SendErrorResponse(writer, "token mismatch") + return + } + } + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) + body, err := io.ReadAll(request.Body) if err != nil { logger.Errorw("failed to parse request body", zap.Error(err)) common.SendErrorResponse(writer, err.Error()) @@ -120,164 +118,204 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ // PostActivate performs operations once the route is activated and ready to consume requests func (router *Router) PostActivate() error { - route := router.GetRoute() - gitlabEventSource := router.gitlabEventSource - - // In order to set up a hook for the GitLab project, - // 1. Get the API access token for client - // 2. Set up GitLab client - // 3. Configure Hook with given event type - // 4. Create project hook - - logger := route.Logger.With( - logging.LabelEndpoint, route.Context.Endpoint, - logging.LabelPort, route.Context.Port, - logging.LabelHTTPMethod, route.Context.Method, - "project-id", gitlabEventSource.ProjectID, - ) - - logger.Info("retrieving the access token credentials...") - c, err := router.getCredentials(gitlabEventSource.AccessToken) - if err != nil { - return errors.Errorf("failed to get gitlab credentials. err: %+v", err) - } + return nil +} - logger.Info("setting up the client to connect to GitLab...") - router.gitlabClient, err = gitlab.NewClient(c.token, gitlab.WithBaseURL(gitlabEventSource.GitlabBaseURL)) - if err != nil { - return errors.Wrapf(err, "failed to initialize client") +// PostInactivate performs operations after the route is inactivated +func (router *Router) PostInactivate() error { + gitlabEventSource := router.gitlabEventSource + if !gitlabEventSource.NeedToCreateHooks() || !gitlabEventSource.DeleteHookOnFinish { + return nil } - formattedURL := common.FormattedURL(gitlabEventSource.Webhook.URL, gitlabEventSource.Webhook.Endpoint) + logger := router.route.Logger + logger.Info("deleting Gitlab hooks...") - hooks, _, err := router.gitlabClient.Projects.ListProjectHooks(gitlabEventSource.ProjectID, &gitlab.ListProjectHooksOptions{}) - if err != nil { - return errors.Wrapf(err, "failed to list existing hooks to check for duplicates for project id %s", router.gitlabEventSource.ProjectID) + for _, g := range gitlabEventSource.GetGroups() { + id, ok := router.groupHookIDs[g] + if !ok { + return fmt.Errorf("can not find hook ID for group %s", g) + } + if _, err := router.gitlabClient.Groups.DeleteGroupHook(g, id); err != nil { + return fmt.Errorf("failed to delete hook for group %s. err: %w", g, err) + } + logger.Infof("Gitlab hook deleted for group %s", g) } - var existingHook *gitlab.ProjectHook - isAlreadyExists := false - - for _, hook := range hooks { - if hook.URL == formattedURL { - existingHook = hook - isAlreadyExists = true + for _, p := range gitlabEventSource.GetProjects() { + id, ok := router.projectHookIDs[p] + if !ok { + return fmt.Errorf("can not find hook ID for project %s", p) + } + if _, err := router.gitlabClient.Projects.DeleteProjectHook(p, id); err != nil { + return fmt.Errorf("failed to delete hook for project %s. err: %w", p, err) } + logger.Infof("Gitlab hook deleted for project %s", p) } + return nil +} - defaultEventValue := false - - editOpt := &gitlab.EditProjectHookOptions{ - URL: &formattedURL, - ConfidentialNoteEvents: &defaultEventValue, - PushEvents: &defaultEventValue, - IssuesEvents: &defaultEventValue, - ConfidentialIssuesEvents: &defaultEventValue, - MergeRequestsEvents: &defaultEventValue, - TagPushEvents: &defaultEventValue, - NoteEvents: &defaultEventValue, - JobEvents: &defaultEventValue, - PipelineEvents: &defaultEventValue, - WikiPageEvents: &defaultEventValue, - EnableSSLVerification: &router.gitlabEventSource.EnableSSLVerification, - Token: &c.token, - } +// StartListening starts an event source +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + logger := logging.FromContext(ctx). + With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) + logger.Info("started processing the Gitlab event source...") - addOpt := &gitlab.AddProjectHookOptions{ - URL: &formattedURL, - Token: &c.token, - EnableSSLVerification: &router.gitlabEventSource.EnableSSLVerification, - ConfidentialNoteEvents: &defaultEventValue, - PushEvents: &defaultEventValue, - IssuesEvents: &defaultEventValue, - ConfidentialIssuesEvents: &defaultEventValue, - MergeRequestsEvents: &defaultEventValue, - TagPushEvents: &defaultEventValue, - NoteEvents: &defaultEventValue, - JobEvents: &defaultEventValue, - PipelineEvents: &defaultEventValue, - WikiPageEvents: &defaultEventValue, - } + defer sources.Recover(el.GetEventName()) - var opt interface{} + gitlabEventSource := &el.GitlabEventSource - opt = addOpt - if isAlreadyExists { - opt = editOpt + route := webhook.NewRoute(gitlabEventSource.Webhook, logger, el.GetEventSourceName(), el.GetEventName(), el.Metrics) + router := &Router{ + route: route, + gitlabEventSource: gitlabEventSource, + projectHookIDs: make(map[string]int), + groupHookIDs: make(map[string]int), } - logger.Info("configuring the GitLab events for the hook...") - - for _, event := range gitlabEventSource.Events { - elem := reflect.ValueOf(opt).Elem().FieldByName(event) - if ok := elem.IsValid(); !ok { - return errors.Errorf("unknown event %s", event) + if gitlabEventSource.NeedToCreateHooks() { + // In order to set up a hook for the GitLab project, + // 1. Get the API access token for client + // 2. Set up GitLab client + // 3. Configure Hook with given event type + // 4. Create project hook + + logger.Info("retrieving the access token credentials...") + + defaultEventValue := false + formattedURL := common.FormattedURL(gitlabEventSource.Webhook.URL, gitlabEventSource.Webhook.Endpoint) + opt := &gitlab.AddProjectHookOptions{ + URL: &formattedURL, + EnableSSLVerification: &router.gitlabEventSource.EnableSSLVerification, + ConfidentialNoteEvents: &defaultEventValue, + PushEvents: &defaultEventValue, + IssuesEvents: &defaultEventValue, + ConfidentialIssuesEvents: &defaultEventValue, + MergeRequestsEvents: &defaultEventValue, + TagPushEvents: &defaultEventValue, + NoteEvents: &defaultEventValue, + JobEvents: &defaultEventValue, + PipelineEvents: &defaultEventValue, + WikiPageEvents: &defaultEventValue, } - iev := reflect.New(elem.Type().Elem()) - reflect.Indirect(iev).SetBool(true) - elem.Set(iev) - } + for _, event := range gitlabEventSource.Events { + elem := reflect.ValueOf(opt).Elem().FieldByName(event) + if ok := elem.IsValid(); !ok { + return fmt.Errorf("unknown event %s", event) + } + iev := reflect.New(elem.Type().Elem()) + reflect.Indirect(iev).SetBool(true) + elem.Set(iev) + } + groupHookOpt := &gitlab.AddGroupHookOptions{ + URL: opt.URL, + EnableSSLVerification: opt.EnableSSLVerification, + ConfidentialNoteEvents: opt.ConfidentialNoteEvents, + PushEvents: opt.PushEvents, + IssuesEvents: opt.IssuesEvents, + ConfidentialIssuesEvents: opt.ConfidentialIssuesEvents, + MergeRequestsEvents: opt.MergeRequestsEvents, + TagPushEvents: opt.TagPushEvents, + NoteEvents: opt.NoteEvents, + JobEvents: opt.JobEvents, + PipelineEvents: opt.PipelineEvents, + WikiPageEvents: opt.WikiPageEvents, + } - var newHook *gitlab.ProjectHook + if gitlabEventSource.SecretToken != nil { + token, err := common.GetSecretFromVolume(gitlabEventSource.SecretToken) + if err != nil { + return fmt.Errorf("failed to retrieve secret token. err: %w", err) + } + opt.Token = &token + groupHookOpt.Token = &token + router.secretToken = token + } - if !isAlreadyExists { - logger.Info("creating project hook...") - newHook, _, err = router.gitlabClient.Projects.AddProjectHook(router.gitlabEventSource.ProjectID, opt.(*gitlab.AddProjectHookOptions)) + accessToken, err := common.GetSecretFromVolume(gitlabEventSource.AccessToken) if err != nil { - return errors.Errorf("failed to add project hook. err: %+v", err) + return fmt.Errorf("failed to get gitlab credentials. err: %w", err) } - } else { - logger.Info("project hook already exists, updating it...") - if existingHook == nil { - return errors.Errorf("existing hook contents are empty, unable to edit existing webhook") - } - newHook, _, err = router.gitlabClient.Projects.EditProjectHook(router.gitlabEventSource.ProjectID, existingHook.ID, opt.(*gitlab.EditProjectHookOptions)) + + logger.Info("setting up the client to connect to GitLab...") + router.gitlabClient, err = gitlab.NewClient(accessToken, gitlab.WithBaseURL(gitlabEventSource.GitlabBaseURL)) if err != nil { - return errors.Errorf("failed to add project hook. err: %+v", err) + return fmt.Errorf("failed to initialize client, %w", err) } - } - router.hook = newHook - logger.With("hook-id", newHook.ID).Info("hook registered for the project") - return nil -} - -// PostInactivate performs operations after the route is inactivated -func (router *Router) PostInactivate() error { - gitlabEventSource := router.gitlabEventSource - route := router.route - - if gitlabEventSource.DeleteHookOnFinish { - logger := route.Logger.With( - "project-id", gitlabEventSource.ProjectID, - "hook-id", router.hook.ID, - ) - - logger.Info("deleting project hook...") - if _, err := router.gitlabClient.Projects.DeleteProjectHook(router.gitlabEventSource.ProjectID, router.hook.ID); err != nil { - return errors.Errorf("failed to delete hook. err: %+v", err) + f := func() { + for _, g := range gitlabEventSource.GetGroups() { + hooks, _, err := router.gitlabClient.Groups.ListGroupHooks(g, &gitlab.ListGroupHooksOptions{}) + if err != nil { + logger.Errorf("failed to list existing webhooks of group %s. err: %+v", g, err) + continue + } + hook := getGroupHook(hooks, formattedURL) + if hook != nil { + router.groupHookIDs[g] = hook.ID + continue + } + logger.Infof("hook not found for group %s, creating ...", g) + hook, _, err = router.gitlabClient.Groups.AddGroupHook(g, groupHookOpt) + if err != nil { + logger.Errorf("failed to create gitlab webhook for group %s. err: %+v", g, err) + continue + } + router.groupHookIDs[g] = hook.ID + time.Sleep(500 * time.Millisecond) + } + + for _, p := range gitlabEventSource.GetProjects() { + hooks, _, err := router.gitlabClient.Projects.ListProjectHooks(p, &gitlab.ListProjectHooksOptions{}) + if err != nil { + logger.Errorf("failed to list existing webhooks of project %s. err: %+v", p, err) + continue + } + hook := getProjectHook(hooks, formattedURL) + if hook != nil { + router.projectHookIDs[p] = hook.ID + continue + } + logger.Infof("hook not found for project %s, creating ...", p) + hook, _, err = router.gitlabClient.Projects.AddProjectHook(p, opt) + if err != nil { + logger.Errorf("failed to create gitlab webhook for project %s. err: %+v", p, err) + continue + } + router.projectHookIDs[p] = hook.ID + time.Sleep(500 * time.Millisecond) + } } - logger.Info("gitlab hook deleted") + // Mitigate race condtions - it might create multiple hooks with same config when replicas > 1 + randomNum, _ := rand.Int(rand.Reader, big.NewInt(int64(2000))) + time.Sleep(time.Duration(randomNum.Int64()) * time.Millisecond) + f() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + go func() { + // Another kind of race conditions might happen when pods do rolling upgrade - new pod starts + // and old pod terminates, if DeleteHookOnFinish is true, the hook will be deleted from gitlab. + // This is a workround to mitigate the race conditions. + logger.Info("starting gitlab hooks manager daemon") + ticker := time.NewTicker(60 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + logger.Info("exiting gitlab hooks manager daemon") + return + case <-ticker.C: + f() + } + } + }() + } else { + logger.Info("no need to create webhooks") } - return nil -} - -// StartListening starts an event source -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { - logger := logging.FromContext(ctx). - With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) - logger.Info("started processing the Gitlab event source...") - - defer sources.Recover(el.GetEventName()) - - gitlabEventSource := &el.GitlabEventSource - route := webhook.NewRoute(gitlabEventSource.Webhook, logger, el.GetEventSourceName(), el.GetEventName(), el.Metrics) - - return webhook.ManageRoute(ctx, &Router{ - route: route, - gitlabEventSource: gitlabEventSource, - }, controller, dispatch) + return webhook.ManageRoute(ctx, router, controller, dispatch) } diff --git a/eventsources/sources/gitlab/types.go b/eventsources/sources/gitlab/types.go index 4b5544ab71..d2622cec4a 100644 --- a/eventsources/sources/gitlab/types.go +++ b/eventsources/sources/gitlab/types.go @@ -54,16 +54,12 @@ type Router struct { route *webhook.Route // gitlabClient is the client to connect to GitLab gitlabClient *gitlab.Client - // hook is gitlab project hook - // GitLab API docs: - // https://docs.gitlab.com/ce/api/projects.html#list-project-hooks - hook *gitlab.ProjectHook + // projectID -> hook ID + projectHookIDs map[string]int + // groupID -> hook ID + groupHookIDs map[string]int // gitlabEventSource is the event source that contains configuration necessary to consume events from GitLab gitlabEventSource *v1alpha1.GitlabEventSource -} - -// cred stores the api access token -type cred struct { - // token is gitlab api access token - token string + // gitlab webhook secret token + secretToken string } diff --git a/eventsources/sources/gitlab/validate.go b/eventsources/sources/gitlab/validate.go index a2c8045e48..7c04bbf08b 100644 --- a/eventsources/sources/gitlab/validate.go +++ b/eventsources/sources/gitlab/validate.go @@ -31,8 +31,8 @@ func validate(eventSource *v1alpha1.GitlabEventSource) error { if eventSource == nil { return common.ErrNilEventSource } - if eventSource.ProjectID == "" { - return fmt.Errorf("project id can't be empty") + if len(eventSource.GetProjects()) == 0 && len(eventSource.GetGroups()) == 0 { + return fmt.Errorf("projects and groups cannot be empty at the same time") } if eventSource.Events == nil { return fmt.Errorf("events can't be empty") diff --git a/eventsources/sources/gitlab/validate_test.go b/eventsources/sources/gitlab/validate_test.go index a372821cca..3ba741474e 100644 --- a/eventsources/sources/gitlab/validate_test.go +++ b/eventsources/sources/gitlab/validate_test.go @@ -19,7 +19,7 @@ package gitlab import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -34,9 +34,9 @@ func TestValidateEventSource(t *testing.T) { err := listener.ValidateEventSource(context.Background()) assert.Error(t, err) - assert.Equal(t, "project id can't be empty", err.Error()) + assert.Equal(t, "projects and groups cannot be empty at the same time", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "gitlab.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "gitlab.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/hdfs/client.go b/eventsources/sources/hdfs/client.go index 9de76d58ab..c1bce92fb0 100644 --- a/eventsources/sources/hdfs/client.go +++ b/eventsources/sources/hdfs/client.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/colinmarc/hdfs" - "github.com/pkg/errors" krb "gopkg.in/jcmturner/gokrb5.v5/client" "gopkg.in/jcmturner/gokrb5.v5/config" "gopkg.in/jcmturner/gokrb5.v5/credentials" @@ -45,7 +44,7 @@ type KeytabOptions struct { func getConfigMapKey(selector *corev1.ConfigMapKeySelector) (string, error) { result, err := common.GetConfigMapFromVolume(selector) if err != nil { - return "", errors.Wrap(err, "configmap value not injected") + return "", fmt.Errorf("configmap value not injected, %w", err) } return result, nil } @@ -53,7 +52,7 @@ func getConfigMapKey(selector *corev1.ConfigMapKeySelector) (string, error) { func getSecretKey(selector *corev1.SecretKeySelector) ([]byte, error) { result, err := common.GetSecretFromVolume(selector) if err != nil { - return nil, errors.Wrap(err, "secret value not injected") + return nil, fmt.Errorf("secret value not injected, %w", err) } return []byte(result), nil } diff --git a/eventsources/sources/hdfs/start.go b/eventsources/sources/hdfs/start.go index a271336026..0774f7c766 100644 --- a/eventsources/sources/hdfs/start.go +++ b/eventsources/sources/hdfs/start.go @@ -3,6 +3,7 @@ package hdfs import ( "context" "encoding/json" + "fmt" "os" "path/filepath" "regexp" @@ -10,10 +11,10 @@ import ( "time" "github.com/colinmarc/hdfs" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/fsevent" "github.com/argoproj/argo-events/eventsources/common/naivewatcher" "github.com/argoproj/argo-events/eventsources/sources" @@ -64,7 +65,7 @@ func (w *WatchableHDFS) GetFileID(fi os.FileInfo) interface{} { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the Emitter event source...") @@ -75,20 +76,20 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("setting up HDFS configuration...") hdfsConfig, err := createHDFSConfig(hdfsEventSource) if err != nil { - return errors.Wrapf(err, "failed to create HDFS configuration for %s", el.GetEventName()) + return fmt.Errorf("failed to create HDFS configuration for %s, %w", el.GetEventName(), err) } log.Info("setting up HDFS client...") hdfscli, err := createHDFSClient(hdfsConfig.Addresses, hdfsConfig.HDFSUser, hdfsConfig.KrbOptions) if err != nil { - return errors.Wrapf(err, "failed to create the HDFS client for %s", el.GetEventName()) + return fmt.Errorf("failed to create the HDFS client for %s, %w", el.GetEventName(), err) } defer hdfscli.Close() log.Info("setting up a new watcher...") watcher, err := naivewatcher.NewWatcher(&WatchableHDFS{hdfscli: hdfscli}) if err != nil { - return errors.Wrapf(err, "failed to create the HDFS watcher for %s", el.GetEventName()) + return fmt.Errorf("failed to create the HDFS watcher for %s, %w", el.GetEventName(), err) } defer watcher.Close() @@ -96,7 +97,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if hdfsEventSource.CheckInterval != "" { d, err := time.ParseDuration(hdfsEventSource.CheckInterval) if err != nil { - return errors.Wrapf(err, "failed to parse the check in interval for %s", el.GetEventName()) + return fmt.Errorf("failed to parse the check in interval for %s, %w", el.GetEventName(), err) } intervalDuration = d } @@ -104,14 +105,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("started HDFS watcher") err = watcher.Start(intervalDuration) if err != nil { - return errors.Wrapf(err, "failed to start the watcher for %s", el.GetEventName()) + return fmt.Errorf("failed to start the watcher for %s, %w", el.GetEventName(), err) } // directory to watch must be available in HDFS. You can't watch a directory that is not present. log.Info("adding configured directory to watcher...") err = watcher.Add(hdfsEventSource.Directory) if err != nil { - return errors.Wrapf(err, "failed to add directory %s for %s", hdfsEventSource.Directory, el.GetEventName()) + return fmt.Errorf("failed to add directory %s for %s, %w", hdfsEventSource.Directory, el.GetEventName(), err) } op := fsevent.NewOp(hdfsEventSource.Type) @@ -119,7 +120,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if hdfsEventSource.PathRegexp != "" { pathRegexp, err = regexp.Compile(hdfsEventSource.PathRegexp) if err != nil { - return errors.Wrapf(err, "failed to compile the path regex %s for %s", hdfsEventSource.PathRegexp, el.GetEventName()) + return fmt.Errorf("failed to compile the path regex %s for %s, %w", hdfsEventSource.PathRegexp, el.GetEventName(), err) } } @@ -130,7 +131,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if !ok { log.Info("HDFS watcher has stopped") // watcher stopped watching file events - return errors.Errorf("watcher has been stopped for %s", el.GetEventName()) + return fmt.Errorf("watcher has been stopped for %s", el.GetEventName()) } event.Metadata = hdfsEventSource.Metadata matched := false @@ -149,14 +150,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } } case err := <-watcher.Errors: - return errors.Wrapf(err, "failed to watch events for %s", el.GetEventName()) + return fmt.Errorf("failed to watch events for %s, %w", el.GetEventName(), err) case <-ctx.Done(): return nil } } } -func (el *EventListener) handleOne(event fsevent.Event, dispatch func([]byte) error, log *zap.SugaredLogger) error { +func (el *EventListener) handleOne(event fsevent.Event, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) @@ -169,12 +170,12 @@ func (el *EventListener) handleOne(event fsevent.Event, dispatch func([]byte) er payload, err := json.Marshal(event) if err != nil { - return errors.Wrap(err, "failed to marshal the event data, rejecting event...") + return fmt.Errorf("failed to marshal the event data, rejecting event, %w", err) } logger.Info("dispatching event on data channel...") if err = dispatch(payload); err != nil { - return errors.Wrap(err, "failed to dispatch an HDFS event") + return fmt.Errorf("failed to dispatch an HDFS event, %w", err) } return nil } diff --git a/eventsources/sources/hdfs/validate.go b/eventsources/sources/hdfs/validate.go index 99f4ddb4f1..659f8f7f79 100644 --- a/eventsources/sources/hdfs/validate.go +++ b/eventsources/sources/hdfs/validate.go @@ -18,7 +18,7 @@ package hdfs import ( "context" - "errors" + "fmt" "time" "github.com/argoproj/argo-events/common" @@ -36,16 +36,16 @@ func validate(eventSource *v1alpha1.HDFSEventSource) error { return common.ErrNilEventSource } if eventSource.Type == "" { - return errors.New("type is required") + return fmt.Errorf("type is required") } op := fsevent.NewOp(eventSource.Type) if op == 0 { - return errors.New("type is invalid") + return fmt.Errorf("type is invalid") } if eventSource.CheckInterval != "" { _, err := time.ParseDuration(eventSource.CheckInterval) if err != nil { - return errors.New("failed to parse interval") + return fmt.Errorf("failed to parse interval") } } err := eventSource.WatchPathConfig.Validate() @@ -53,20 +53,20 @@ func validate(eventSource *v1alpha1.HDFSEventSource) error { return err } if len(eventSource.Addresses) == 0 { - return errors.New("addresses is required") + return fmt.Errorf("addresses is required") } hasKrbCCache := eventSource.KrbCCacheSecret != nil hasKrbKeytab := eventSource.KrbKeytabSecret != nil if eventSource.HDFSUser == "" && !hasKrbCCache && !hasKrbKeytab { - return errors.New("either hdfsUser, krbCCacheSecret or krbKeytabSecret is required") + return fmt.Errorf("either hdfsUser, krbCCacheSecret or krbKeytabSecret is required") } if hasKrbKeytab && (eventSource.KrbServicePrincipalName == "" || eventSource.KrbConfigConfigMap == nil || eventSource.KrbUsername == "" || eventSource.KrbRealm == "") { - return errors.New("krbServicePrincipalName, krbConfigConfigMap, krbUsername and krbRealm are required with krbKeytabSecret") + return fmt.Errorf("krbServicePrincipalName, krbConfigConfigMap, krbUsername and krbRealm are required with krbKeytabSecret") } if hasKrbCCache && (eventSource.KrbServicePrincipalName == "" || eventSource.KrbConfigConfigMap == nil) { - return errors.New("krbServicePrincipalName and krbConfigConfigMap are required with krbCCacheSecret") + return fmt.Errorf("krbServicePrincipalName and krbConfigConfigMap are required with krbCCacheSecret") } return err } diff --git a/eventsources/sources/hdfs/validate_test.go b/eventsources/sources/hdfs/validate_test.go index 75e5f0395b..9092992f1d 100644 --- a/eventsources/sources/hdfs/validate_test.go +++ b/eventsources/sources/hdfs/validate_test.go @@ -3,7 +3,7 @@ package hdfs import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -19,7 +19,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "type is required", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "hdfs.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "hdfs.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/kafka/start.go b/eventsources/sources/kafka/start.go index 647dbd5f9c..4e73de0173 100644 --- a/eventsources/sources/kafka/start.go +++ b/eventsources/sources/kafka/start.go @@ -19,17 +19,20 @@ package kafka import ( "context" "encoding/json" + "errors" + "fmt" "strconv" "strings" "sync" "time" - "github.com/Shopify/sarama" - "github.com/pkg/errors" + "github.com/IBM/sarama" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -70,7 +73,7 @@ func verifyPartitionAvailable(part int32, partitions []int32) bool { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) defer sources.Recover(el.GetEventName()) @@ -85,7 +88,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } } -func (el *EventListener) consumerGroupConsumer(ctx context.Context, log *zap.SugaredLogger, kafkaEventSource *v1alpha1.KafkaEventSource, dispatch func([]byte) error) error { +func (el *EventListener) consumerGroupConsumer(ctx context.Context, log *zap.SugaredLogger, kafkaEventSource *v1alpha1.KafkaEventSource, dispatch func([]byte, ...eventsourcecommon.Option) error) error { config, err := getSaramaConfig(kafkaEventSource, log) if err != nil { return err @@ -93,14 +96,14 @@ func (el *EventListener) consumerGroupConsumer(ctx context.Context, log *zap.Sug switch kafkaEventSource.ConsumerGroup.RebalanceStrategy { case "sticky": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategySticky + config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategySticky()} case "roundrobin": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRoundRobin + config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRoundRobin()} case "range": - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()} default: log.Info("Invalid rebalance strategy, using default: range") - config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange + config.Consumer.Group.Rebalance.GroupStrategies = []sarama.BalanceStrategy{sarama.NewBalanceStrategyRange()} } consumer := Consumer{ @@ -155,7 +158,7 @@ func (el *EventListener) consumerGroupConsumer(ctx context.Context, log *zap.Sug return nil } -func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.SugaredLogger, kafkaEventSource *v1alpha1.KafkaEventSource, dispatch func([]byte) error) error { +func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.SugaredLogger, kafkaEventSource *v1alpha1.KafkaEventSource, dispatch func([]byte, ...eventsourcecommon.Option) error) error { defer sources.Recover(el.GetEventName()) log.Info("start kafka event source...") @@ -163,7 +166,7 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared var consumer sarama.Consumer log.Info("connecting to Kafka cluster...") - if err := common.Connect(kafkaEventSource.ConnectionBackoff, func() error { + if err := common.DoWithRetry(kafkaEventSource.ConnectionBackoff, func() error { var err error config, err := getSaramaConfig(kafkaEventSource, log) @@ -178,7 +181,7 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared } return nil }); err != nil { - return errors.Wrapf(err, "failed to connect to Kafka broker for event source %s", el.GetEventName()) + return fmt.Errorf("failed to connect to Kafka broker for event source %s, %w", el.GetEventName(), err) } log = log.With("partition-id", kafkaEventSource.Partition) @@ -186,25 +189,25 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared log.Info("parsing the partition value...") pInt, err := strconv.ParseInt(kafkaEventSource.Partition, 10, 32) if err != nil { - return errors.Wrapf(err, "failed to parse Kafka partition %s for event source %s", kafkaEventSource.Partition, el.GetEventName()) + return fmt.Errorf("failed to parse Kafka partition %s for event source %s, %w", kafkaEventSource.Partition, el.GetEventName(), err) } partition := int32(pInt) log.Info("getting available partitions...") availablePartitions, err := consumer.Partitions(kafkaEventSource.Topic) if err != nil { - return errors.Wrapf(err, "failed to get the available partitions for topic %s and event source %s", kafkaEventSource.Topic, el.GetEventName()) + return fmt.Errorf("failed to get the available partitions for topic %s and event source %s, %w", kafkaEventSource.Topic, el.GetEventName(), err) } log.Info("verifying the partition exists within available partitions...") if ok := verifyPartitionAvailable(partition, availablePartitions); !ok { - return errors.Wrapf(err, "partition %d is not available. event source %s", partition, el.GetEventName()) + return fmt.Errorf("partition %d is not available. event source %s, %w", partition, el.GetEventName(), err) } log.Info("getting partition consumer...") partitionConsumer, err := consumer.ConsumePartition(kafkaEventSource.Topic, partition, sarama.OffsetNewest) if err != nil { - return errors.Wrapf(err, "failed to create consumer partition for event source %s", el.GetEventName()) + return fmt.Errorf("failed to create consumer partition for event source %s, %w", el.GetEventName(), err) } processOne := func(msg *sarama.ConsumerMessage) error { @@ -215,10 +218,20 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared log.Info("dispatching event on the data channel...") eventData := &events.KafkaEventData{ Topic: msg.Topic, + Key: string(msg.Key), Partition: int(msg.Partition), Timestamp: msg.Timestamp.String(), Metadata: kafkaEventSource.Metadata, } + + headers := make(map[string]string) + + for _, recordHeader := range msg.Headers { + headers[string(recordHeader.Key)] = string(recordHeader.Value) + } + + eventData.Headers = headers + if kafkaEventSource.JSONBody { eventData.Body = (*json.RawMessage)(&msg.Value) } else { @@ -226,10 +239,13 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared } eventBody, err := json.Marshal(eventData) if err != nil { - return errors.Wrap(err, "failed to marshal the event data, rejecting the event...") + return fmt.Errorf("failed to marshal the event data, rejecting the event, %w", err) } - if err = dispatch(eventBody); err != nil { - return errors.Wrap(err, "failed to dispatch a Kafka event...") + + kafkaID := genUniqueID(el.GetEventSourceName(), el.GetEventName(), kafkaEventSource.URL, msg.Topic, msg.Partition, msg.Offset) + + if err = dispatch(eventBody, eventsourcecommon.WithID(kafkaID)); err != nil { + return fmt.Errorf("failed to dispatch a Kafka event, %w", err) } return nil } @@ -243,7 +259,7 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) } case err := <-partitionConsumer.Errors(): - return errors.Wrapf(err, "failed to consume messages for event source %s", el.GetEventName()) + return fmt.Errorf("failed to consume messages for event source %s, %w", el.GetEventName(), err) case <-ctx.Done(): log.Info("event source is stopped, closing partition consumer") @@ -256,8 +272,11 @@ func (el *EventListener) partitionConsumer(ctx context.Context, log *zap.Sugared } } -func getSaramaConfig(kafkaEventSource *v1alpha1.KafkaEventSource, log *zap.SugaredLogger) (*sarama.Config, error) { //nolint:interfacer - config := sarama.NewConfig() +func getSaramaConfig(kafkaEventSource *v1alpha1.KafkaEventSource, log *zap.SugaredLogger) (*sarama.Config, error) { + config, err := common.GetSaramaConfigFromYAMLString(kafkaEventSource.Config) + if err != nil { + return nil, err + } if kafkaEventSource.Version == "" { config.Version = sarama.V1_0_0_0 @@ -274,6 +293,11 @@ func getSaramaConfig(kafkaEventSource *v1alpha1.KafkaEventSource, log *zap.Sugar config.Net.SASL.Enable = true config.Net.SASL.Mechanism = sarama.SASLMechanism(kafkaEventSource.SASL.GetMechanism()) + if config.Net.SASL.Mechanism == "SCRAM-SHA-512" { + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA512New} } + } else if config.Net.SASL.Mechanism == "SCRAM-SHA-256" { + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA256New} } + } user, err := common.GetSecretFromVolume(kafkaEventSource.SASL.UserSecret) if err != nil { @@ -293,7 +317,7 @@ func getSaramaConfig(kafkaEventSource *v1alpha1.KafkaEventSource, log *zap.Sugar if kafkaEventSource.TLS != nil { tlsConfig, err := common.GetTLSConfig(kafkaEventSource.TLS) if err != nil { - return nil, errors.Wrap(err, "failed to get the tls configuration") + return nil, fmt.Errorf("failed to get the tls configuration, %w", err) } config.Net.TLS.Config = tlsConfig config.Net.TLS.Enable = true @@ -310,7 +334,7 @@ func getSaramaConfig(kafkaEventSource *v1alpha1.KafkaEventSource, log *zap.Sugar // Consumer represents a Sarama consumer group consumer type Consumer struct { ready chan bool - dispatch func([]byte) error + dispatch func([]byte, ...eventsourcecommon.Option) error logger *zap.SugaredLogger kafkaEventSource *v1alpha1.KafkaEventSource eventSourceName string @@ -330,22 +354,29 @@ func (consumer *Consumer) Cleanup(sarama.ConsumerGroupSession) error { return nil } +var eventBusErr *eventbuscommon.EventBusError + // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages(). func (consumer *Consumer) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { // NOTE: // Do not move the code below to a goroutine. // The `ConsumeClaim` itself is called within a goroutine, see: - // https://github.com/Shopify/sarama/blob/master/consumer_group.go#L27-L29 + // https://github.com/IBM/sarama/blob/master/consumer_group.go#L27-L29 for message := range claim.Messages() { if err := consumer.processOne(session, message); err != nil { - consumer.logger.Errorw("failed to process a Kafka message", zap.Error(err)) consumer.metrics.EventProcessingFailed(consumer.eventSourceName, consumer.eventName) - continue + if errors.As(err, &eventBusErr) { // EventBus error, do not continue. + consumer.logger.Errorw("failed to process a Kafka message due to event bus issue", zap.Error(err)) + break + } else { + consumer.logger.Errorw("failed to process a Kafka message, skip it", zap.Error(err)) + continue + } } if consumer.kafkaEventSource.LimitEventsPerSecond > 0 { - //1000000000 is 1 second in nanoseconds + // 1000000000 is 1 second in nanoseconds d := (1000000000 / time.Duration(consumer.kafkaEventSource.LimitEventsPerSecond) * time.Nanosecond) * time.Nanosecond - consumer.logger.Infof("Sleeping for: %v.", d) + consumer.logger.Debugf("Sleeping for: %v.", d) time.Sleep(d) } } @@ -361,10 +392,20 @@ func (consumer *Consumer) processOne(session sarama.ConsumerGroupSession, messag consumer.logger.Info("dispatching event on the data channel...") eventData := &events.KafkaEventData{ Topic: message.Topic, + Key: string(message.Key), Partition: int(message.Partition), Timestamp: message.Timestamp.String(), Metadata: consumer.kafkaEventSource.Metadata, } + + headers := make(map[string]string) + + for _, recordHeader := range message.Headers { + headers[string(recordHeader.Key)] = string(recordHeader.Value) + } + + eventData.Headers = headers + if consumer.kafkaEventSource.JSONBody { eventData.Body = (*json.RawMessage)(&message.Value) } else { @@ -372,12 +413,22 @@ func (consumer *Consumer) processOne(session sarama.ConsumerGroupSession, messag } eventBody, err := json.Marshal(eventData) if err != nil { - return errors.Wrap(err, "failed to marshal the event data, rejecting the event...") + return fmt.Errorf("failed to marshal the event data, rejecting the event, %w", err) } - if err = consumer.dispatch(eventBody); err != nil { - return errors.Wrap(err, "failed to dispatch a kafka event...") + messageID := genUniqueID(consumer.eventSourceName, consumer.eventName, consumer.kafkaEventSource.URL, message.Topic, message.Partition, message.Offset) + + if err = consumer.dispatch(eventBody, eventsourcecommon.WithID(messageID)); err != nil { + return fmt.Errorf("failed to dispatch a kafka event, %w", err) } session.MarkMessage(message, "") return nil } + +// Function can be passed as Option to generate unique id for kafka event +// eventSourceName:eventName:kafka-url:topic:partition:offset +func genUniqueID(eventSourceName, eventName, kafkaURL, topic string, partition int32, offset int64) string { + kafkaID := fmt.Sprintf("%s:%s:%s:%s:%d:%d", eventSourceName, eventName, strings.Split(kafkaURL, ",")[0], topic, partition, offset) + + return kafkaID +} diff --git a/eventsources/sources/kafka/validate_test.go b/eventsources/sources/kafka/validate_test.go index 7d47305a73..25a023c297 100644 --- a/eventsources/sources/kafka/validate_test.go +++ b/eventsources/sources/kafka/validate_test.go @@ -19,7 +19,7 @@ package kafka import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -36,7 +36,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "url must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "kafka.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "kafka.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/minio/start.go b/eventsources/sources/minio/start.go index 4b55acfde2..0ae150a16d 100644 --- a/eventsources/sources/minio/start.go +++ b/eventsources/sources/minio/start.go @@ -18,15 +18,21 @@ package minio import ( "context" + "crypto/tls" + "crypto/x509" "encoding/json" + "fmt" + "net/http" "time" - "github.com/minio/minio-go" - "github.com/pkg/errors" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" + "github.com/minio/minio-go/v7/pkg/notification" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -57,7 +63,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName(), zap.String("bucketName", el.MinioEventSource.Bucket.Name)) @@ -70,25 +76,49 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("retrieving access and secret key...") accessKey, err := common.GetSecretFromVolume(minioEventSource.AccessKey) if err != nil { - return errors.Wrapf(err, "failed to get the access key for event source %s", el.GetEventName()) + return fmt.Errorf("failed to get the access key for event source %s, %w", el.GetEventName(), err) } secretKey, err := common.GetSecretFromVolume(minioEventSource.SecretKey) if err != nil { - return errors.Wrapf(err, "failed to retrieve the secret key for event source %s", el.GetEventName()) + return fmt.Errorf("failed to retrieve the secret key for event source %s, %w", el.GetEventName(), err) } - log.Info("setting up a minio client...") - minioClient, err := minio.New(minioEventSource.Endpoint, accessKey, secretKey, !minioEventSource.Insecure) - if err != nil { - return errors.Wrapf(err, "failed to create a client for event source %s", el.GetEventName()) + var minioClient *minio.Client + var clientErr error + if minioEventSource.CACertificate != nil { + log.Info("retrieving CA certificate...") + caCertificate, err := common.GetSecretFromVolume(minioEventSource.CACertificate) + if err != nil { + return fmt.Errorf("failed to get the CA certificate for event source %s, %w", el.GetEventName(), err) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM([]byte(caCertificate)) + tlsConfig := &tls.Config{ + RootCAs: caCertPool, + } + log.Info("setting up a minio client with custom CA...") + minioClient, clientErr = minio.New(minioEventSource.Endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Secure: !minioEventSource.Insecure, + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + }) + } else { + log.Info("setting up a minio client...") + minioClient, clientErr = minio.New(minioEventSource.Endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Secure: !minioEventSource.Insecure, + }) + } + if clientErr != nil { + return fmt.Errorf("failed to create a client for event source %s, %w", el.GetEventName(), err) } prefix, suffix := getFilters(minioEventSource) - doneCh := make(chan struct{}) - log.Info("started listening to bucket notifications...") - for notification := range minioClient.ListenBucketNotification(minioEventSource.Bucket.Name, prefix, suffix, minioEventSource.Events, doneCh) { + for notification := range minioClient.ListenBucketNotification(ctx, minioEventSource.Bucket.Name, prefix, suffix, minioEventSource.Events) { if notification.Err != nil { log.Errorw("invalid notification", zap.Error(notification.Err)) continue @@ -100,14 +130,11 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } } - <-ctx.Done() - doneCh <- struct{}{} - log.Info("event source is stopped") return nil } -func (el *EventListener) handleOne(notification minio.NotificationInfo, dispatch func([]byte) error, log *zap.SugaredLogger) error { +func (el *EventListener) handleOne(notification notification.Info, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) @@ -115,12 +142,12 @@ func (el *EventListener) handleOne(notification minio.NotificationInfo, dispatch eventData := &events.MinioEventData{Notification: notification.Records, Metadata: el.MinioEventSource.Metadata} eventBytes, err := json.Marshal(eventData) if err != nil { - return errors.Wrap(err, "failed to marshal the event data, rejecting the event...") + return fmt.Errorf("failed to marshal the event data, rejecting the event, %w", err) } log.Info("dispatching the event on data channel...") if err = dispatch(eventBytes); err != nil { - return errors.Wrap(err, "failed to dispatch minio event") + return fmt.Errorf("failed to dispatch minio event, %w", err) } return nil } diff --git a/eventsources/sources/minio/validate.go b/eventsources/sources/minio/validate.go index f3db82a906..a5c5794969 100644 --- a/eventsources/sources/minio/validate.go +++ b/eventsources/sources/minio/validate.go @@ -22,7 +22,7 @@ import ( "github.com/argoproj/argo-events/common" apicommon "github.com/argoproj/argo-events/pkg/apis/common" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7/pkg/notification" ) // ValidateEventSource validates the minio event source @@ -48,7 +48,7 @@ func validate(eventSource *apicommon.S3Artifact) error { } if eventSource.Events != nil { for _, event := range eventSource.Events { - if minio.NotificationEventType(event) == "" { + if notification.EventType(event) == "" { return fmt.Errorf("unknown event %s", event) } } diff --git a/eventsources/sources/minio/validate_test.go b/eventsources/sources/minio/validate_test.go index b9415f7bbd..a0b4834aba 100644 --- a/eventsources/sources/minio/validate_test.go +++ b/eventsources/sources/minio/validate_test.go @@ -19,7 +19,7 @@ package minio import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "access key can't be empty", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "minio.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "minio.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/mqtt/start.go b/eventsources/sources/mqtt/start.go index a218b4a52c..d6e2e3681d 100644 --- a/eventsources/sources/mqtt/start.go +++ b/eventsources/sources/mqtt/start.go @@ -19,14 +19,15 @@ package mqtt import ( "context" "encoding/json" + "fmt" "time" mqttlib "github.com/eclipse/paho.mqtt.golang" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -58,7 +59,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) defer sources.Recover(el.GetEventName()) @@ -107,27 +108,40 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if mqttEventSource.TLS != nil { tlsConfig, err := common.GetTLSConfig(mqttEventSource.TLS) if err != nil { - return errors.Wrap(err, "failed to get the tls configuration") + return fmt.Errorf("failed to get the tls configuration, %w", err) } opts.TLSConfig = tlsConfig } + if mqttEventSource.Auth != nil { + username, err := common.GetSecretFromVolume(mqttEventSource.Auth.Username) + if err != nil { + return fmt.Errorf("username not found, %w", err) + } + password, err := common.GetSecretFromVolume(mqttEventSource.Auth.Password) + if err != nil { + return fmt.Errorf("password not found, %w", err) + } + opts.SetUsername(username) + opts.SetPassword(password) + } + var client mqttlib.Client log.Info("connecting to mqtt broker...") - if err := common.Connect(mqttEventSource.ConnectionBackoff, func() error { + if err := common.DoWithRetry(mqttEventSource.ConnectionBackoff, func() error { client = mqttlib.NewClient(opts) if token := client.Connect(); token.Wait() && token.Error() != nil { return token.Error() } return nil }); err != nil { - return errors.Wrapf(err, "failed to connect to the mqtt broker for event source %s", el.GetEventName()) + return fmt.Errorf("failed to connect to the mqtt broker for event source %s, %w", el.GetEventName(), err) } log.Info("subscribing to the topic...") if token := client.Subscribe(mqttEventSource.Topic, 0, handler); token.Wait() && token.Error() != nil { - return errors.Wrapf(token.Error(), "failed to subscribe to the topic %s for event source %s", mqttEventSource.Topic, el.GetEventName()) + return fmt.Errorf("failed to subscribe to the topic %s for event source %s, %w", mqttEventSource.Topic, el.GetEventName(), token.Error()) } <-ctx.Done() diff --git a/eventsources/sources/mqtt/validate.go b/eventsources/sources/mqtt/validate.go index 3aec44c5b5..9e7670dd38 100644 --- a/eventsources/sources/mqtt/validate.go +++ b/eventsources/sources/mqtt/validate.go @@ -46,5 +46,8 @@ func validate(eventSource *v1alpha1.MQTTEventSource) error { if eventSource.TLS != nil { return apicommon.ValidateTLSConfig(eventSource.TLS) } + if eventSource.Auth != nil { + return apicommon.ValidateBasicAuth(eventSource.Auth) + } return nil } diff --git a/eventsources/sources/mqtt/validate_test.go b/eventsources/sources/mqtt/validate_test.go index 1dcd65883c..d69e5da683 100644 --- a/eventsources/sources/mqtt/validate_test.go +++ b/eventsources/sources/mqtt/validate_test.go @@ -19,7 +19,7 @@ package mqtt import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "url must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "mqtt.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "mqtt.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/nats/start.go b/eventsources/sources/nats/start.go index 568d0ee174..f73c326dd6 100644 --- a/eventsources/sources/nats/start.go +++ b/eventsources/sources/nats/start.go @@ -19,14 +19,15 @@ package nats import ( "context" "encoding/json" + "fmt" "time" - natslib "github.com/nats-io/go-nats" - "github.com/pkg/errors" + natslib "github.com/nats-io/nats.go" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -58,7 +59,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) defer sources.Recover(el.GetEventName()) @@ -69,7 +70,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if natsEventSource.TLS != nil { tlsConfig, err := common.GetTLSConfig(natsEventSource.TLS) if err != nil { - return errors.Wrap(err, "failed to get the tls configuration") + return fmt.Errorf("failed to get the tls configuration, %w", err) } opt = append(opt, natslib.Secure(tlsConfig)) } @@ -99,7 +100,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } o, err := natslib.NkeyOptionFromSeed(nkeyFile) if err != nil { - return errors.Wrap(err, "failed to get NKey") + return fmt.Errorf("failed to get NKey, %w", err) } opt = append(opt, o) case natsEventSource.Auth.Credential != nil: @@ -113,14 +114,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt var conn *natslib.Conn log.Info("connecting to nats cluster...") - if err := common.Connect(natsEventSource.ConnectionBackoff, func() error { + if err := common.DoWithRetry(natsEventSource.ConnectionBackoff, func() error { var err error if conn, err = natslib.Connect(natsEventSource.URL, opt...); err != nil { return err } return nil }); err != nil { - return errors.Wrapf(err, "failed to connect to the nats server for event source %s", el.GetEventName()) + return fmt.Errorf("failed to connect to the nats server for event source %s, %w", el.GetEventName(), err) } defer conn.Close() @@ -128,14 +129,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("assuming all events have a json body...") } - log.Info("subscribing to messages on the queue...") - _, err := conn.Subscribe(natsEventSource.Subject, func(msg *natslib.Msg) { + handler := func(msg *natslib.Msg) { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) eventData := &events.NATSEventData{ Subject: msg.Subject, + Header: msg.Header, Metadata: natsEventSource.Metadata, } if natsEventSource.JSONBody { @@ -155,14 +156,24 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Errorw("failed to dispatch a NATS event", zap.Error(err)) el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) } - }) + } + + var err error + if natsEventSource.Queue != nil { + log.Infof("subscribing to messages on the subject %s queue %s", natsEventSource.Subject, *natsEventSource.Queue) + _, err = conn.QueueSubscribe(natsEventSource.Subject, *natsEventSource.Queue, handler) + } else { + log.Infof("subscribing to messages on the subject %s", natsEventSource.Subject) + _, err = conn.Subscribe(natsEventSource.Subject, handler) + } + if err != nil { - return errors.Wrapf(err, "failed to subscribe to the subject %s for event source %s", natsEventSource.Subject, el.GetEventName()) + return fmt.Errorf("failed to subscribe to the subject %s for event source %s, %w", natsEventSource.Subject, el.GetEventName(), err) } conn.Flush() if err := conn.LastError(); err != nil { - return errors.Wrapf(err, "connection failure for event source %s", el.GetEventName()) + return fmt.Errorf("connection failure for event source %s, %w", el.GetEventName(), err) } <-ctx.Done() diff --git a/eventsources/sources/nats/validate.go b/eventsources/sources/nats/validate.go index cbccf2a868..db9f365da7 100644 --- a/eventsources/sources/nats/validate.go +++ b/eventsources/sources/nats/validate.go @@ -18,8 +18,7 @@ package nats import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/argoproj/argo-events/common" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -36,13 +35,16 @@ func validate(eventSource *v1alpha1.NATSEventsSource) error { return common.ErrNilEventSource } if eventSource.URL == "" { - return errors.New("url must be specified") + return fmt.Errorf("url must be specified") } if eventSource.Subject == "" { - return errors.New("subject must be specified") + return fmt.Errorf("subject must be specified") } if eventSource.TLS != nil { return apicommon.ValidateTLSConfig(eventSource.TLS) } + if eventSource.Queue != nil && *eventSource.Queue == "" { + return fmt.Errorf("queue group cannot be empty if specified") + } return nil } diff --git a/eventsources/sources/nats/validate_test.go b/eventsources/sources/nats/validate_test.go index 48d4bd87b7..9a8e6e798d 100644 --- a/eventsources/sources/nats/validate_test.go +++ b/eventsources/sources/nats/validate_test.go @@ -19,7 +19,7 @@ package nats import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "url must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "nats.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "nats.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/nsq/start.go b/eventsources/sources/nsq/start.go index 91c60ddfb5..7eabc85d38 100644 --- a/eventsources/sources/nsq/start.go +++ b/eventsources/sources/nsq/start.go @@ -19,15 +19,16 @@ package nsq import ( "context" "encoding/json" + "fmt" "strconv" "time" "github.com/nsqio/go-nsq" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -62,14 +63,14 @@ type messageHandler struct { eventSourceName string eventName string metrics *metrics.Metrics - dispatch func([]byte) error + dispatch func([]byte, ...eventsourcecommon.Option) error logger *zap.SugaredLogger isJSON bool metadata map[string]string } // StartListening listens NSQ events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the NSQ event source...") @@ -85,20 +86,20 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if nsqEventSource.TLS != nil { tlsConfig, err := common.GetTLSConfig(nsqEventSource.TLS) if err != nil { - return errors.Wrap(err, "failed to get the tls configuration") + return fmt.Errorf("failed to get the tls configuration, %w", err) } config.TlsConfig = tlsConfig config.TlsV1 = true } - if err := common.Connect(nsqEventSource.ConnectionBackoff, func() error { + if err := common.DoWithRetry(nsqEventSource.ConnectionBackoff, func() error { var err error if consumer, err = nsq.NewConsumer(nsqEventSource.Topic, nsqEventSource.Channel, config); err != nil { return err } return nil }); err != nil { - return errors.Wrapf(err, "failed to create a new consumer for topic %s and channel %s for event source %s", nsqEventSource.Topic, nsqEventSource.Channel, el.GetEventName()) + return fmt.Errorf("failed to create a new consumer for topic %s and channel %s for event source %s, %w", nsqEventSource.Topic, nsqEventSource.Channel, el.GetEventName(), err) } if nsqEventSource.JSONBody { @@ -108,7 +109,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt consumer.AddHandler(&messageHandler{eventSourceName: el.EventSourceName, eventName: el.EventName, dispatch: dispatch, logger: log, isJSON: nsqEventSource.JSONBody, metadata: nsqEventSource.Metadata, metrics: el.Metrics}) if err := consumer.ConnectToNSQLookupd(nsqEventSource.HostAddress); err != nil { - return errors.Wrapf(err, "lookup failed for host %s for event source %s", nsqEventSource.HostAddress, el.GetEventName()) + return fmt.Errorf("lookup failed for host %s for event source %s, %w", nsqEventSource.HostAddress, el.GetEventName(), err) } <-ctx.Done() diff --git a/eventsources/sources/nsq/validate.go b/eventsources/sources/nsq/validate.go index a9585d7c38..df46caf06d 100644 --- a/eventsources/sources/nsq/validate.go +++ b/eventsources/sources/nsq/validate.go @@ -18,8 +18,7 @@ package nsq import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/argoproj/argo-events/common" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -36,13 +35,13 @@ func validate(eventSource *v1alpha1.NSQEventSource) error { return common.ErrNilEventSource } if eventSource.HostAddress == "" { - return errors.New("host address must be specified") + return fmt.Errorf("host address must be specified") } if eventSource.Topic == "" { - return errors.New("topic must be specified") + return fmt.Errorf("topic must be specified") } if eventSource.Channel == "" { - return errors.New("channel must be specified") + return fmt.Errorf("channel must be specified") } if eventSource.TLS != nil { return apicommon.ValidateTLSConfig(eventSource.TLS) diff --git a/eventsources/sources/nsq/validate_test.go b/eventsources/sources/nsq/validate_test.go index 04afc2ffbd..767821b5f4 100644 --- a/eventsources/sources/nsq/validate_test.go +++ b/eventsources/sources/nsq/validate_test.go @@ -19,7 +19,7 @@ package nsq import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -36,7 +36,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "host address must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "nsq.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "nsq.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/pulsar/start.go b/eventsources/sources/pulsar/start.go index 3fc4ea7ea1..e1307a999d 100644 --- a/eventsources/sources/pulsar/start.go +++ b/eventsources/sources/pulsar/start.go @@ -18,14 +18,15 @@ package pulsar import ( "context" "encoding/json" + "fmt" "time" "github.com/apache/pulsar-client-go/pulsar" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -57,7 +58,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening listens Pulsar events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the Pulsar event source...") @@ -86,7 +87,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if pulsarEventSource.TLSTrustCertsSecret != nil { tlsTrustCertsFilePath, err = common.GetSecretVolumePath(pulsarEventSource.TLSTrustCertsSecret) if err != nil { - log.Errorw("failed to get TLSTrustCertsFilePath from the volume", "error", err) + log.Errorw("failed to get TLSTrustCertsFilePath from the volume", zap.Error(err)) return err } } @@ -97,6 +98,28 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt TLSValidateHostname: pulsarEventSource.TLSValidateHostname, } + if pulsarEventSource.AuthTokenSecret != nil { + token, err := common.GetSecretFromVolume(pulsarEventSource.AuthTokenSecret) + if err != nil { + log.Errorw("failed to get AuthTokenSecret from the volume", zap.Error(err)) + return err + } + clientOpt.Authentication = pulsar.NewAuthenticationToken(token) + } + + if len(pulsarEventSource.AuthAthenzParams) > 0 { + log.Info("setting athenz auth option...") + if pulsarEventSource.AuthAthenzSecret != nil { + authAthenzFilePath, err := common.GetSecretVolumePath(pulsarEventSource.AuthAthenzSecret) + if err != nil { + log.Errorw("failed to get authAthenzSecret from the volume", zap.Error(err)) + return err + } + pulsarEventSource.AuthAthenzParams["privateKey"] = "file://" + authAthenzFilePath + } + clientOpt.Authentication = pulsar.NewAuthenticationAthenz(pulsarEventSource.AuthAthenzParams) + } + if pulsarEventSource.TLS != nil { log.Info("setting tls auth option...") var clientCertPath, clientKeyPath string @@ -104,40 +127,36 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt case pulsarEventSource.TLS.ClientCertSecret != nil && pulsarEventSource.TLS.ClientKeySecret != nil: clientCertPath, err = common.GetSecretVolumePath(pulsarEventSource.TLS.ClientCertSecret) if err != nil { - log.Errorw("failed to get ClientCertPath from the volume", "error", err) + log.Errorw("failed to get ClientCertPath from the volume", zap.Error(err)) return err } clientKeyPath, err = common.GetSecretVolumePath(pulsarEventSource.TLS.ClientKeySecret) if err != nil { - log.Errorw("failed to get ClientKeyPath from the volume", "error", err) + log.Errorw("failed to get ClientKeyPath from the volume", zap.Error(err)) return err } - case pulsarEventSource.TLS.DeprecatedClientCertPath != "" && pulsarEventSource.TLS.DeprecatedClientKeyPath != "": - // DEPRECATED. - clientCertPath = pulsarEventSource.TLS.DeprecatedClientCertPath - clientKeyPath = pulsarEventSource.TLS.DeprecatedClientKeyPath default: - return errors.New("invalid TLS config") + return fmt.Errorf("invalid TLS config") } clientOpt.Authentication = pulsar.NewAuthenticationTLS(clientCertPath, clientKeyPath) } var client pulsar.Client - if err := common.Connect(pulsarEventSource.ConnectionBackoff, func() error { + if err := common.DoWithRetry(pulsarEventSource.ConnectionBackoff, func() error { var err error if client, err = pulsar.NewClient(clientOpt); err != nil { return err } return nil }); err != nil { - return errors.Wrapf(err, "failed to connect to %s for event source %s", pulsarEventSource.URL, el.GetEventName()) + return fmt.Errorf("failed to connect to %s for event source %s, %w", pulsarEventSource.URL, el.GetEventName(), err) } log.Info("subscribing to messages on the topic...") consumer, err := client.Subscribe(consumerOpt) if err != nil { - return errors.Wrapf(err, "failed to connect to topic %+v for event source %s", pulsarEventSource.Topics, el.GetEventName()) + return fmt.Errorf("failed to connect to topic %+v for event source %s, %w", pulsarEventSource.Topics, el.GetEventName(), err) } consumeMessages: @@ -146,13 +165,17 @@ consumeMessages: case msg, ok := <-msgChannel: if !ok { log.Error("failed to read a message, channel might have been closed") - return errors.New("channel might have been closed") + return fmt.Errorf("channel might have been closed") } if err := el.handleOne(msg, dispatch, log); err != nil { log.Errorw("failed to process a Pulsar event", zap.Error(err)) el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) } + + if err := consumer.Ack(msg.Message); err != nil { + return fmt.Errorf("failed to process a consumer ack, %w", err) + } case <-ctx.Done(): consumer.Close() client.Close() @@ -164,7 +187,7 @@ consumeMessages: return nil } -func (el *EventListener) handleOne(msg pulsar.Message, dispatch func([]byte) error, log *zap.SugaredLogger) error { +func (el *EventListener) handleOne(msg pulsar.Message, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) @@ -183,12 +206,12 @@ func (el *EventListener) handleOne(msg pulsar.Message, dispatch func([]byte) err eventBody, err := json.Marshal(eventData) if err != nil { - return errors.Wrap(err, "failed to marshal the event data. rejecting the event...") + return fmt.Errorf("failed to marshal the event data. rejecting the event, %w", err) } log.Infof("dispatching the message received on the topic %s to eventbus", msg.Topic()) if err = dispatch(eventBody); err != nil { - return errors.Wrap(err, "failed to dispatch a Pulsar event") + return fmt.Errorf("failed to dispatch a Pulsar event, %w", err) } return nil } diff --git a/eventsources/sources/pulsar/validate_test.go b/eventsources/sources/pulsar/validate_test.go index f2e5f41e9d..ba4184f157 100644 --- a/eventsources/sources/pulsar/validate_test.go +++ b/eventsources/sources/pulsar/validate_test.go @@ -18,7 +18,7 @@ package pulsar import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -34,7 +34,7 @@ func TestEventListener_ValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "topics can't be empty list", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "pulsar.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "pulsar.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/redis/start.go b/eventsources/sources/redis/start.go index 8e9a2dcf47..6f0ddbbca5 100644 --- a/eventsources/sources/redis/start.go +++ b/eventsources/sources/redis/start.go @@ -19,14 +19,15 @@ package redis import ( "context" "encoding/json" + "fmt" "time" - "github.com/go-redis/redis" - "github.com/pkg/errors" + "github.com/go-redis/redis/v8" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -58,7 +59,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening listens events published by redis -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the Redis event source...") @@ -75,15 +76,19 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if redisEventSource.Password != nil { password, err := common.GetSecretFromVolume(redisEventSource.Password) if err != nil { - return errors.Wrapf(err, "failed to find the secret password %s", redisEventSource.Password.Name) + return fmt.Errorf("failed to find the secret password %s, %w", redisEventSource.Password.Name, err) } opt.Password = password } + if redisEventSource.Username != "" { + opt.Username = redisEventSource.Username + } + if redisEventSource.TLS != nil { tlsConfig, err := common.GetTLSConfig(redisEventSource.TLS) if err != nil { - return errors.Wrap(err, "failed to get the tls configuration") + return fmt.Errorf("failed to get the tls configuration, %w", err) } opt.TLSConfig = tlsConfig } @@ -91,14 +96,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("setting up a redis client...") client := redis.NewClient(opt) - if status := client.Ping(); status.Err() != nil { - return errors.Wrapf(status.Err(), "failed to connect to host %s and db %d for event source %s", redisEventSource.HostAddress, redisEventSource.DB, el.GetEventName()) + if status := client.Ping(ctx); status.Err() != nil { + return fmt.Errorf("failed to connect to host %s and db %d for event source %s, %w", redisEventSource.HostAddress, redisEventSource.DB, el.GetEventName(), status.Err()) } - pubsub := client.Subscribe(redisEventSource.Channels...) + pubsub := client.Subscribe(ctx, redisEventSource.Channels...) // Wait for confirmation that subscription is created before publishing anything. - if _, err := pubsub.Receive(); err != nil { - return errors.Wrapf(err, "failed to receive the subscription confirmation for event source %s", el.GetEventName()) + if _, err := pubsub.Receive(ctx); err != nil { + return fmt.Errorf("failed to receive the subscription confirmation for event source %s, %w", el.GetEventName(), err) } // Go channel which receives messages. @@ -108,7 +113,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt case message, ok := <-ch: if !ok { log.Error("failed to read a message, channel might have been closed") - return errors.New("channel might have been closed") + return fmt.Errorf("channel might have been closed") } if err := el.handleOne(message, dispatch, log); err != nil { @@ -117,7 +122,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } case <-ctx.Done(): log.Info("event source is stopped. unsubscribing the subscription") - if err := pubsub.Unsubscribe(redisEventSource.Channels...); err != nil { + if err := pubsub.Unsubscribe(ctx, redisEventSource.Channels...); err != nil { log.Errorw("failed to unsubscribe", zap.Error(err)) } return nil @@ -125,7 +130,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } } -func (el *EventListener) handleOne(message *redis.Message, dispatch func([]byte) error, log *zap.SugaredLogger) error { +func (el *EventListener) handleOne(message *redis.Message, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { defer func(start time.Time) { el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) }(time.Now()) @@ -137,13 +142,18 @@ func (el *EventListener) handleOne(message *redis.Message, dispatch func([]byte) Body: message.Payload, Metadata: el.RedisEventSource.Metadata, } + if el.RedisEventSource.JSONBody { + body := []byte(message.Payload) + eventData.Body = (*json.RawMessage)(&body) + } + eventBody, err := json.Marshal(&eventData) if err != nil { - return errors.Wrap(err, "failed to marshal the event data, rejecting the event...") + return fmt.Errorf("failed to marshal the event data, rejecting the event, %w", err) } - log.With("channel", message.Channel).Info("dispatching th event on the data channel...") + log.With("channel", message.Channel).Info("dispatching the event on the data channel...") if err = dispatch(eventBody); err != nil { - return errors.Wrap(err, "failed dispatch a Redis event") + return fmt.Errorf("failed dispatch a Redis event, %w", err) } return nil } diff --git a/eventsources/sources/redis/start_test.go b/eventsources/sources/redis/start_test.go new file mode 100644 index 0000000000..9c3dcaa399 --- /dev/null +++ b/eventsources/sources/redis/start_test.go @@ -0,0 +1,57 @@ +package redis + +import ( + "encoding/json" + "testing" + + "github.com/go-redis/redis/v8" + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + metrics "github.com/argoproj/argo-events/metrics" + "github.com/argoproj/argo-events/pkg/apis/events" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +func Test_HandleOne(t *testing.T) { + el := &EventListener{ + EventSourceName: "esName", + EventName: "eName", + RedisEventSource: v1alpha1.RedisEventSource{}, + Metrics: metrics.NewMetrics("ns"), + } + + msg := &redis.Message{ + Channel: "ch", + Pattern: "p", + Payload: `{"a": "b"}`, + } + + getDispatcher := func(isJson bool) func(d []byte, opts ...eventsourcecommon.Option) error { + return func(d []byte, opts ...eventsourcecommon.Option) error { + eventData := &events.RedisEventData{} + err := json.Unmarshal(d, eventData) + assert.NoError(t, err) + assert.Equal(t, msg.Pattern, eventData.Pattern) + assert.Equal(t, msg.Channel, eventData.Channel) + if !isJson { + s, ok := eventData.Body.(string) + assert.True(t, ok) + assert.Equal(t, msg.Payload, s) + } else { + s, ok := eventData.Body.(map[string]interface{}) + assert.True(t, ok) + assert.Equal(t, "b", s["a"]) + } + return nil + } + } + + err := el.handleOne(msg, getDispatcher(el.RedisEventSource.JSONBody), zaptest.NewLogger(t).Sugar()) + assert.NoError(t, err) + + el.RedisEventSource.JSONBody = true + err = el.handleOne(msg, getDispatcher(el.RedisEventSource.JSONBody), zaptest.NewLogger(t).Sugar()) + assert.NoError(t, err) +} diff --git a/eventsources/sources/redis/validate.go b/eventsources/sources/redis/validate.go index 840ac821ec..53f2f1cb26 100644 --- a/eventsources/sources/redis/validate.go +++ b/eventsources/sources/redis/validate.go @@ -17,8 +17,7 @@ package redis import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/argoproj/argo-events/common" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -35,13 +34,13 @@ func validate(eventSource *v1alpha1.RedisEventSource) error { return common.ErrNilEventSource } if eventSource.HostAddress == "" { - return errors.New("host address must be specified") + return fmt.Errorf("host address must be specified") } if eventSource.Channels == nil { - return errors.New("channel/s must be specified") + return fmt.Errorf("channel/s must be specified") } if eventSource.Password != nil && eventSource.Namespace == "" { - return errors.New("namespace must be defined in order to retrieve the password from the secret") + return fmt.Errorf("namespace must be defined in order to retrieve the password from the secret") } if eventSource.TLS != nil { return apicommon.ValidateTLSConfig(eventSource.TLS) diff --git a/eventsources/sources/redis/validate_test.go b/eventsources/sources/redis/validate_test.go index 177350366a..8972dfda79 100644 --- a/eventsources/sources/redis/validate_test.go +++ b/eventsources/sources/redis/validate_test.go @@ -19,7 +19,7 @@ package redis import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateRedisEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "host address must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "redis.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "redis.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/redis_stream/start.go b/eventsources/sources/redis_stream/start.go new file mode 100644 index 0000000000..df35f3437b --- /dev/null +++ b/eventsources/sources/redis_stream/start.go @@ -0,0 +1,218 @@ +/* +Copyright 2020 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package redisstream + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "github.com/go-redis/redis/v8" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + "github.com/argoproj/argo-events/eventsources/sources" + metrics "github.com/argoproj/argo-events/metrics" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/events" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// EventListener implements Eventing for the Redis event source +type EventListener struct { + EventSourceName string + EventName string + EventSource v1alpha1.RedisStreamEventSource + Metrics *metrics.Metrics +} + +// GetEventSourceName returns name of event source +func (el *EventListener) GetEventSourceName() string { + return el.EventSourceName +} + +// GetEventName returns name of event +func (el *EventListener) GetEventName() string { + return el.EventName +} + +// GetEventSourceType return type of event server +func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { + return apicommon.RedisStreamEvent +} + +// StartListening listens for new data on specified redis streams +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + log := logging.FromContext(ctx). + With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) + log.Info("started processing the Redis stream event source...") + defer sources.Recover(el.GetEventName()) + + redisEventSource := &el.EventSource + + opt := &redis.Options{ + Addr: redisEventSource.HostAddress, + DB: int(redisEventSource.DB), + } + + log.Info("retrieving password if it has been configured...") + if redisEventSource.Password != nil { + password, err := common.GetSecretFromVolume(redisEventSource.Password) + if err != nil { + return fmt.Errorf("failed to find the secret password %s, %w", redisEventSource.Password.Name, err) + } + opt.Password = password + } + + if redisEventSource.Username != "" { + opt.Username = redisEventSource.Username + } + + if redisEventSource.TLS != nil { + tlsConfig, err := common.GetTLSConfig(redisEventSource.TLS) + if err != nil { + return fmt.Errorf("failed to get the tls configuration, %w", err) + } + opt.TLSConfig = tlsConfig + } + + log.Infof("setting up a redis client for %s...", redisEventSource.HostAddress) + client := redis.NewClient(opt) + + if status := client.Ping(ctx); status.Err() != nil { + return fmt.Errorf("failed to connect to host %s and db %d for event source %s, %w", redisEventSource.HostAddress, redisEventSource.DB, el.GetEventName(), status.Err()) + } + log.Infof("connected to redis server %s", redisEventSource.HostAddress) + + // Create a common consumer group on all streams to start reading from beginning of the streams. + // Only proceeds if all the streams are already present + consumersGroup := "argo-events-cg" + if len(redisEventSource.ConsumerGroup) != 0 { + consumersGroup = redisEventSource.ConsumerGroup + } + for _, stream := range redisEventSource.Streams { + // create a consumer group to start reading from the current last entry in the stream (https://redis.io/commands/xgroup-create) + if err := client.XGroupCreate(ctx, stream, consumersGroup, "$").Err(); err != nil { + // redis package doesn't seem to expose concrete error types + if err.Error() != "BUSYGROUP Consumer Group name already exists" { + return fmt.Errorf("creating consumer group %s for stream %s on host %s for event source %s, %w", consumersGroup, stream, redisEventSource.HostAddress, el.GetEventName(), err) + } + log.Infof("Consumer group %q already exists in stream %q", consumersGroup, stream) + } + } + + readGroupArgs := make([]string, 2*len(redisEventSource.Streams)) + copy(readGroupArgs, redisEventSource.Streams) + // Start by reading our pending messages(previously read but not acknowledged). + streamToLastEntryMapping := make(map[string]string, len(redisEventSource.Streams)) + for _, s := range redisEventSource.Streams { + streamToLastEntryMapping[s] = "0-0" + } + + updateReadGroupArgs := func() { + for i, s := range redisEventSource.Streams { + readGroupArgs[i+len(redisEventSource.Streams)] = streamToLastEntryMapping[s] + } + } + updateReadGroupArgs() + + msgCount := redisEventSource.MaxMsgCountPerRead + if msgCount == 0 { + msgCount = 10 + } + + var msgsToAcknowledge []string + for { + select { + case <-ctx.Done(): + log.Infof("Redis stream event source for host %s is stopped", redisEventSource.HostAddress) + return nil + default: + } + entries, err := client.XReadGroup(ctx, &redis.XReadGroupArgs{ + Group: consumersGroup, + Consumer: "argo-events-worker", + Streams: readGroupArgs, + Count: int64(msgCount), + Block: 2 * time.Second, + NoAck: false, + }).Result() + if err != nil { + if err == redis.Nil { + continue + } + log.With("streams", redisEventSource.Streams).Errorw("reading streams using XREADGROUP", zap.Error(err)) + } + + for _, entry := range entries { + if len(entry.Messages) == 0 { + // Completed consuming pending messages. Now start consuming new messages + streamToLastEntryMapping[entry.Stream] = ">" + } + + msgsToAcknowledge = msgsToAcknowledge[:0] + + for _, message := range entry.Messages { + if err := el.handleOne(entry.Stream, message, dispatch, log); err != nil { + log.With("stream", entry.Stream, "message_id", message.ID).Errorw("failed to process Redis stream message", zap.Error(err)) + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + continue + } + msgsToAcknowledge = append(msgsToAcknowledge, message.ID) + } + + if len(msgsToAcknowledge) == 0 { + continue + } + + // Even if acknowledging fails, since we handled the message, we are good to proceed. + if err := client.XAck(ctx, entry.Stream, consumersGroup, msgsToAcknowledge...).Err(); err != nil { + log.With("stream", entry.Stream, "message_ids", msgsToAcknowledge).Errorw("failed to acknowledge messages from the Redis stream", zap.Error(err)) + } + if streamToLastEntryMapping[entry.Stream] != ">" { + streamToLastEntryMapping[entry.Stream] = msgsToAcknowledge[len(msgsToAcknowledge)-1] + } + } + updateReadGroupArgs() + } +} + +func (el *EventListener) handleOne(stream string, message redis.XMessage, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { + defer func(start time.Time) { + el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) + }(time.Now()) + + log.With("stream", stream, "message_id", message.ID).Info("received a message") + eventData := &events.RedisStreamEventData{ + Stream: stream, + Id: message.ID, + Values: message.Values, + Metadata: el.EventSource.Metadata, + } + eventBody, err := json.Marshal(&eventData) + if err != nil { + return fmt.Errorf("failed to marshal the event data, rejecting the event, %w", err) + } + log.With("stream", stream).Info("dispatching the event on the data channel...") + if err = dispatch(eventBody); err != nil { + return fmt.Errorf("failed dispatch a Redis stream event, %w", err) + } + return nil +} diff --git a/eventsources/sources/redis_stream/validate.go b/eventsources/sources/redis_stream/validate.go new file mode 100644 index 0000000000..d8cab86b6a --- /dev/null +++ b/eventsources/sources/redis_stream/validate.go @@ -0,0 +1,46 @@ +/* +Copyright 2020 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package redisstream + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-events/common" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// ValidateEventSource validates nats event source +func (el *EventListener) ValidateEventSource(ctx context.Context) error { + return validate(&el.EventSource) +} + +func validate(eventSource *v1alpha1.RedisStreamEventSource) error { + if eventSource == nil { + return common.ErrNilEventSource + } + if eventSource.HostAddress == "" { + return fmt.Errorf("host address must be specified") + } + if eventSource.Streams == nil { + return fmt.Errorf("stream/streams must be specified") + } + if eventSource.TLS != nil { + return apicommon.ValidateTLSConfig(eventSource.TLS) + } + return nil +} diff --git a/eventsources/sources/redis_stream/validate_test.go b/eventsources/sources/redis_stream/validate_test.go new file mode 100644 index 0000000000..4c400d49e5 --- /dev/null +++ b/eventsources/sources/redis_stream/validate_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package redisstream + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" +) + +func TestValidateRedisEventSource(t *testing.T) { + listener := &EventListener{} + + err := listener.ValidateEventSource(context.Background()) + assert.Error(t, err) + assert.Equal(t, "host address must be specified", err.Error()) + + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "redis-streams.yaml")) + assert.Nil(t, err) + + var eventSource *v1alpha1.EventSource + err = yaml.Unmarshal(content, &eventSource) + assert.Nil(t, err) + assert.NotNil(t, eventSource.Spec.RedisStream) + + for name, value := range eventSource.Spec.RedisStream { + fmt.Println(name) + l := &EventListener{ + EventSource: value, + } + err := l.ValidateEventSource(context.Background()) + assert.NoError(t, err) + } +} diff --git a/eventsources/sources/resource/start.go b/eventsources/sources/resource/start.go index 59ee7c2f32..d0a16a130f 100644 --- a/eventsources/sources/resource/start.go +++ b/eventsources/sources/resource/start.go @@ -19,12 +19,12 @@ package resource import ( "context" "encoding/json" + "fmt" "os" "regexp" "strings" "time" - "github.com/pkg/errors" "github.com/tidwall/gjson" "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -34,15 +34,18 @@ import ( "k8s.io/apimachinery/pkg/selection" "k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic/dynamicinformer" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/events" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + "github.com/danielm-codefresh/argo-multi-cluster/pkg/clusterauth" ) // InformerEvent holds event generated from resource state change @@ -76,7 +79,7 @@ func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { } // StartListening watches resource updates and consume those events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) defer sources.Recover(el.GetEventName()) @@ -85,15 +88,35 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig) restConfig, err := common.GetClientConfig(kubeConfig) if err != nil { - return errors.Wrapf(err, "failed to get a K8s rest config for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to get a K8s rest config for the event source %s, %w", el.GetEventName(), err) } + + resourceEventSource := &el.ResourceEventSource + + if resourceEventSource.Cluster != "" { + clientset, err := kubernetes.NewForConfig(restConfig) + if err != nil { + return err + } + + secret, err := clusterauth.GetClusterSecret(clientset, resourceEventSource.Cluster) + if err != nil { + return err + } + + cluster, err := clusterauth.SecretToCluster(*secret) + if err != nil { + return err + } + + restConfig = cluster.RESTConfig() + } + client, err := dynamic.NewForConfig(restConfig) if err != nil { - return errors.Wrapf(err, "failed to set up a dynamic K8s client for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to set up a dynamic K8s client for the event source %s, %w", el.GetEventName(), err) } - resourceEventSource := &el.ResourceEventSource - gvr := schema.GroupVersionResource{ Group: resourceEventSource.Group, Version: resourceEventSource.Version, @@ -108,7 +131,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt if resourceEventSource.Filter != nil && resourceEventSource.Filter.Labels != nil { sel, err := LabelSelector(resourceEventSource.Filter.Labels) if err != nil { - return errors.Wrapf(err, "failed to create the label selector for the event source %s", el.GetEventName()) + return fmt.Errorf("failed to create the label selector for the event source %s, %w", el.GetEventName(), err) } options.LabelSelector = sel.String() } @@ -137,14 +160,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt objBody, err := json.Marshal(event.Obj) if err != nil { - return errors.Wrap(err, "failed to marshal the resource, rejecting the event...") + return fmt.Errorf("failed to marshal the resource, rejecting the event, %w", err) } var oldObjBody []byte if event.OldObj != nil { oldObjBody, err = json.Marshal(event.OldObj) if err != nil { - return errors.Wrap(err, "failed to marshal the resource, rejecting the event...") + return fmt.Errorf("failed to marshal the resource, rejecting the event, %w", err) } } @@ -156,15 +179,16 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt Version: resourceEventSource.Version, Resource: resourceEventSource.Resource, Metadata: resourceEventSource.Metadata, + Cluster: resourceEventSource.Cluster, } eventBody, err := json.Marshal(eventData) if err != nil { - return errors.Wrap(err, "failed to marshal the event. rejecting the event...") + return fmt.Errorf("failed to marshal the event. rejecting the event, %w", err) } if err = dispatch(eventBody); err != nil { - return errors.Wrap(err, "failed to dispatch a resource event") + return fmt.Errorf("failed to dispatch a resource event, %w", err) } return nil } @@ -191,6 +215,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt case v1alpha1.ADD: handlerFuncs.AddFunc = func(obj interface{}) { log.Info("detected create event") + logResourceMetadata(obj.(*unstructured.Unstructured), log) informerEventCh <- &InformerEvent{ Obj: obj, Type: v1alpha1.ADD, @@ -199,6 +224,13 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt case v1alpha1.UPDATE: handlerFuncs.UpdateFunc = func(oldObj, newObj interface{}) { log.Info("detected update event") + logResourceMetadata(newObj.(*unstructured.Unstructured), log) + uNewObj := newObj.(*unstructured.Unstructured) + uOldObj := oldObj.(*unstructured.Unstructured) + if uNewObj.GetResourceVersion() == uOldObj.GetResourceVersion() { + log.Infof("rejecting update event with identical resource versions: %s", uNewObj.GetResourceVersion()) + return + } informerEventCh <- &InformerEvent{ Obj: newObj, OldObj: oldObj, @@ -208,6 +240,7 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt case v1alpha1.DELETE: handlerFuncs.DeleteFunc = func(obj interface{}) { log.Info("detected delete event") + logResourceMetadata(obj.(*unstructured.Unstructured), log) informerEventCh <- &InformerEvent{ Obj: obj, Type: v1alpha1.DELETE, @@ -215,12 +248,14 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt } default: stopCh <- struct{}{} - return errors.Errorf("unknown event type: %s", string(eventType)) + return fmt.Errorf("unknown event type: %s", string(eventType)) } } sharedInformer := informer.Informer() - sharedInformer.AddEventHandler(handlerFuncs) + if _, err := sharedInformer.AddEventHandler(handlerFuncs); err != nil { + return fmt.Errorf("failed to add event handler, %w", err) + } doneCh := make(chan struct{}) @@ -243,7 +278,16 @@ func LabelReq(sel v1alpha1.Selector) (*labels.Requirement, error) { if sel.Operation != "" { op = selection.Operator(sel.Operation) } - req, err := labels.NewRequirement(sel.Key, op, []string{sel.Value}) + var values []string + switch { + case (op == selection.Exists || op == selection.DoesNotExist) && sel.Value == "": + values = []string{} + case op == selection.In || op == selection.NotIn: + values = strings.Split(sel.Value, ",") + default: + values = []string{sel.Value} + } + req, err := labels.NewRequirement(sel.Key, op, values) if err != nil { return nil, err } @@ -348,3 +392,17 @@ func getEventTime(obj *unstructured.Unstructured, eventType v1alpha1.ResourceEve return obj.GetCreationTimestamp() } } + +func logResourceMetadata(uObj *unstructured.Unstructured, log *zap.SugaredLogger) { + kind := uObj.GetKind() + // ATM we want to log additional metadata only for workflows + if kind != "Workflow" { + return + } + + uid := uObj.GetUID() + name := uObj.GetName() + namespace := uObj.GetNamespace() + generation := uObj.GetGeneration() + log.Infof("handling resource: %s, name: %s, namespace: %s, uid: %s, generation: %d", kind, name, namespace, uid, generation) +} diff --git a/eventsources/sources/resource/start_test.go b/eventsources/sources/resource/start_test.go index a57564ad74..8434863e7e 100644 --- a/eventsources/sources/resource/start_test.go +++ b/eventsources/sources/resource/start_test.go @@ -19,6 +19,7 @@ package resource import ( "context" "encoding/json" + "fmt" "testing" "time" @@ -26,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/kubernetes/fake" "github.com/argoproj/argo-events/common/logging" @@ -105,3 +107,174 @@ func TestFilter(t *testing.T) { convey.So(pass, convey.ShouldBeTrue) }) } + +func TestLabelSelector(t *testing.T) { + // Test equality operators =, == and in + for _, op := range []string{"==", "=", "in"} { + t.Run(fmt.Sprintf("Test operator %v", op), func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: op, + Value: "1", + }}) + if err != nil { + t.Fatal(err) + } + validL := &labels.Set{"key": "1"} + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + invalidL := &labels.Set{"key": "2"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) + } + // Test inequality operators != and notin + for _, op := range []string{"!=", "notin"} { + t.Run(fmt.Sprintf("Test operator %v", op), func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: op, + Value: "1", + }}) + if err != nil { + t.Fatal(err) + } + validL := &labels.Set{"key": "2"} + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + invalidL := &labels.Set{"key": "1"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) + } + // Test greater than operator + t.Run("Test operator gt", func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: "gt", + Value: "1", + }}) + if err != nil { + t.Fatal(err) + } + validL := &labels.Set{"key": "2"} + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + invalidL := &labels.Set{"key": "1"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) + // Test lower than operator + t.Run("Test operator lt", func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: "lt", + Value: "2", + }}) + if err != nil { + t.Fatal(err) + } + validL := &labels.Set{"key": "1"} + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + invalidL := &labels.Set{"key": "2"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) + // Test exists operator + t.Run("Test operator exists", func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: "exists", + }}) + if err != nil { + t.Fatal(err) + } + validL := &labels.Set{"key": "something"} + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + invalidL := &labels.Set{"notkey": "something"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) + // Test doesnot exist operator + t.Run("Test operator !", func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: "!", + }}) + if err != nil { + t.Fatal(err) + } + validL := &labels.Set{"notkey": "something"} + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + invalidL := &labels.Set{"key": "something"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) + // Test default operator + t.Run("Test default operator", func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: "", + Value: "something", + }}) + if err != nil { + t.Fatal(err) + } + validL := &labels.Set{"key": "something"} + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + invalidL := &labels.Set{"key": "not something"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) + // Test invalid operators <= and >= + for _, op := range []string{"<=", ">="} { + t.Run(fmt.Sprintf("Invalid operator %v", op), func(t *testing.T) { + _, err := LabelSelector([]v1alpha1.Selector{{ + Key: "workflows.argoproj.io/phase", + Operation: op, + Value: "1", + }}) + if err == nil { + t.Errorf("Invalid operator should throw error") + } + }) + } + // Test comma separated values for in + t.Run("Comma separated values", func(t *testing.T) { + r, err := LabelSelector([]v1alpha1.Selector{{ + Key: "key", + Operation: "in", + Value: "a,b,", + }}) + if err != nil { + t.Fatal("valid value threw error, value %w", err) + } + for _, validL := range []labels.Set{{"key": "a"}, {"key": "b"}, {"key": ""}} { + if !r.Matches(validL) { + t.Errorf("didnot match %v", validL) + } + } + invalidL := &labels.Set{"key": "c"} + if r.Matches(invalidL) { + t.Errorf("matched %v", invalidL) + } + }) +} diff --git a/eventsources/sources/resource/validate_test.go b/eventsources/sources/resource/validate_test.go index 0cc7c90011..16e42c1f87 100644 --- a/eventsources/sources/resource/validate_test.go +++ b/eventsources/sources/resource/validate_test.go @@ -19,7 +19,7 @@ package resource import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -36,7 +36,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "version must be specified", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "resource.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "resource.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/sftp/start.go b/eventsources/sources/sftp/start.go new file mode 100644 index 0000000000..d8becd3ebe --- /dev/null +++ b/eventsources/sources/sftp/start.go @@ -0,0 +1,310 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sftp + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + "regexp" + "strings" + "time" + + "github.com/fsnotify/fsnotify" + "github.com/pkg/sftp" + "go.uber.org/zap" + "golang.org/x/crypto/ssh" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" + "github.com/argoproj/argo-events/eventsources/common/fsevent" + "github.com/argoproj/argo-events/eventsources/sources" + metrics "github.com/argoproj/argo-events/metrics" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// EventListener implements Eventing for sftp event source +type EventListener struct { + EventSourceName string + EventName string + SFTPEventSource v1alpha1.SFTPEventSource + Metrics *metrics.Metrics +} + +// GetEventSourceName returns name of event source +func (el *EventListener) GetEventSourceName() string { + return el.EventSourceName +} + +// GetEventName returns name of event +func (el *EventListener) GetEventName() string { + return el.EventName +} + +// GetEventSourceType return type of event server +func (el *EventListener) GetEventSourceType() apicommon.EventSourceType { + return apicommon.SFTPEvent +} + +// StartListening starts listening events +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { + log := logging.FromContext(ctx). + With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) + defer sources.Recover(el.GetEventName()) + + username, err := common.GetSecretFromVolume(el.SFTPEventSource.Username) + if err != nil { + return fmt.Errorf("username not found, %w", err) + } + address, err := common.GetSecretFromVolume(el.SFTPEventSource.Address) + if err != nil { + return fmt.Errorf("address not found, %w", err) + } + + var authMethod ssh.AuthMethod + var hostKeyCallback ssh.HostKeyCallback + + if el.SFTPEventSource.SSHKeySecret != nil { + sshKeyPath, err := common.GetSecretVolumePath(el.SFTPEventSource.SSHKeySecret) + if err != nil { + return fmt.Errorf("failed to get SSH key from mounted volume, %w", err) + } + sshKey, err := os.ReadFile(sshKeyPath) + if err != nil { + return fmt.Errorf("failed to read ssh key file. err: %+v", err) + } + signer, err := ssh.ParsePrivateKey(sshKey) + if err != nil { + return fmt.Errorf("failed to parse private ssh key. err: %+v", err) + } + publicKey, _, _, _, err := ssh.ParseAuthorizedKey(sshKey) + if err != nil { + return fmt.Errorf("failed to parse public ssh key. err: %+v", err) + } + authMethod = ssh.PublicKeys(signer) + hostKeyCallback = ssh.FixedHostKey(publicKey) + } else { + password, err := common.GetSecretFromVolume(el.SFTPEventSource.Password) + if err != nil { + return fmt.Errorf("password not found, %w", err) + } + authMethod = ssh.Password(password) + hostKeyCallback = ssh.InsecureIgnoreHostKey() + } + + sftpConfig := &ssh.ClientConfig{ + User: username, + Auth: []ssh.AuthMethod{authMethod}, + HostKeyCallback: hostKeyCallback, + } + + var sshClient *ssh.Client + err = common.DoWithRetry(nil, func() error { + var err error + sshClient, err = ssh.Dial("tcp", address, sftpConfig) + return err + }) + if err != nil { + return fmt.Errorf("dialing sftp address %s: %w", address, err) + } + + sftpClient, err := sftp.NewClient(sshClient) + if err != nil { + return fmt.Errorf("new sftp client: %w", err) + } + defer sftpClient.Close() + + if err := el.listenEvents(ctx, sftpClient, dispatch, log); err != nil { + log.Error("failed to listen to events", zap.Error(err)) + return err + } + return nil +} + +// listenEvents listen to sftp related events. +func (el *EventListener) listenEvents(ctx context.Context, sftpClient *sftp.Client, dispatch func([]byte, ...eventsourcecommon.Option) error, log *zap.SugaredLogger) error { + sftpEventSource := &el.SFTPEventSource + + log.Info("identifying new files in sftp...") + startingFiles, err := sftpNonDirFiles(sftpClient, sftpEventSource.WatchPathConfig.Directory) + if err != nil { + return fmt.Errorf("failed to read directory %s for %s, %w", sftpEventSource.WatchPathConfig.Directory, el.GetEventName(), err) + } + + // TODO: do we need some sort of stateful mechanism to capture changes between event source restarts? + // This would allow loading startingFiles from store/cache rather than initializing starting files from remote sftp source + + var pathRegexp *regexp.Regexp + if sftpEventSource.WatchPathConfig.PathRegexp != "" { + log.Infow("matching file path with configured regex...", zap.Any("regex", sftpEventSource.WatchPathConfig.PathRegexp)) + pathRegexp, err = regexp.Compile(sftpEventSource.WatchPathConfig.PathRegexp) + if err != nil { + return fmt.Errorf("failed to match file path with configured regex %s for %s, %w", sftpEventSource.WatchPathConfig.PathRegexp, el.GetEventName(), err) + } + } + + processOne := func(event fsnotify.Event) error { + defer func(start time.Time) { + el.Metrics.EventProcessingDuration(el.GetEventSourceName(), el.GetEventName(), float64(time.Since(start)/time.Millisecond)) + }(time.Now()) + + log.Infow("sftp event", zap.Any("event-type", event.Op.String()), zap.Any("descriptor-name", event.Name)) + + fileEvent := fsevent.Event{Name: event.Name, Op: fsevent.NewOp(event.Op.String()), Metadata: el.SFTPEventSource.Metadata} + payload, err := json.Marshal(fileEvent) + if err != nil { + return fmt.Errorf("failed to marshal the event to the fs event, %w", err) + } + log.Infow("dispatching sftp event on data channel...", zap.Any("event-type", event.Op.String()), zap.Any("descriptor-name", event.Name)) + if err = dispatch(payload); err != nil { + return fmt.Errorf("failed to dispatch an sftp event, %w", err) + } + return nil + } + + maybeProcess := func(fi fs.FileInfo, op fsnotify.Op) error { + matched := false + relPath := strings.TrimPrefix(fi.Name(), sftpEventSource.WatchPathConfig.Directory) + if sftpEventSource.WatchPathConfig.Path != "" && sftpEventSource.WatchPathConfig.Path == relPath { + matched = true + } else if pathRegexp != nil && pathRegexp.MatchString(relPath) { + matched = true + } + if matched && sftpEventSource.EventType == op.String() { + if err = processOne(fsnotify.Event{ + Name: fi.Name(), + Op: op, + }); err != nil { + log.Errorw("failed to process a sftp event", zap.Error(err)) + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + } + } + + return nil + } + + pollIntervalDuration := time.Second * 10 + if d, err := time.ParseDuration(sftpEventSource.PollIntervalDuration); err != nil { + pollIntervalDuration = d + } else { + log.Errorw("failed parsing poll interval duration.. falling back to %s: %w", pollIntervalDuration.String(), err) + } + + log.Info("listening to sftp notifications... polling interval %s", pollIntervalDuration.String()) + for { + select { + case <-time.After(pollIntervalDuration): + + files, err := sftpNonDirFiles(sftpClient, sftpEventSource.WatchPathConfig.Directory) + if err != nil { + return fmt.Errorf("failed to read directory %s for %s, %w", sftpEventSource.WatchPathConfig.Directory, el.GetEventName(), err) + } + + fileDiff := diffFiles(startingFiles, files) + if fileDiff.isEmpty() { + continue + } + + log.Infof("found %d new files and %d removed files", len(fileDiff.new), len(fileDiff.removed)) + + for _, fi := range fileDiff.removed { + if err = maybeProcess(fi, fsnotify.Remove); err != nil { + log.Errorw("failed to process a file event", zap.Error(err)) + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + } + } + for _, fi := range fileDiff.new { + if err = maybeProcess(fi, fsnotify.Create); err != nil { + log.Errorw("failed to process a file event", zap.Error(err)) + el.Metrics.EventProcessingFailed(el.GetEventSourceName(), el.GetEventName()) + } + } + + // TODO: errors processing files will result in dropped events + // adjusting the logic for overwriting startingFiles could enable the next tick + // to capture the event + startingFiles = files + + case <-ctx.Done(): + log.Info("event source has been stopped") + return nil + } + } +} + +func sftpNonDirFiles(sftpClient *sftp.Client, dir string) ([]fs.FileInfo, error) { + var files []fs.FileInfo + err := common.DoWithRetry(nil, func() error { + var err error + files, err = sftpClient.ReadDir(dir) + return err + }) + if err != nil { + return nil, err + } + var nonDirFiles []fs.FileInfo + for _, f := range files { + if !f.IsDir() { + nonDirFiles = append(nonDirFiles, f) + } + } + + files = nonDirFiles + return files, nil +} + +type fileDiff struct { + new []fs.FileInfo + removed []fs.FileInfo +} + +func (f fileDiff) isEmpty() bool { + return (len(f.new) + len(f.removed)) == 0 +} + +func diffFiles(startingFiles, currentFiles []fs.FileInfo) fileDiff { + fileMap := make(map[string]fs.FileInfo) + for _, file := range currentFiles { + fileMap[file.Name()] = file + } + + var diff fileDiff + + for _, startingFile := range startingFiles { + name := startingFile.Name() + + if newFile, ok := fileMap[name]; !ok { + diff.removed = append(diff.removed, startingFile) + } else { + delete(fileMap, name) + + if newFile.Size() != startingFile.Size() || newFile.ModTime() != startingFile.ModTime() { + diff.new = append(diff.new, newFile) + } + } + } + + for _, newFile := range fileMap { + diff.new = append(diff.new, newFile) + } + + return diff +} diff --git a/eventsources/sources/sftp/validate.go b/eventsources/sources/sftp/validate.go new file mode 100644 index 0000000000..da8df9b512 --- /dev/null +++ b/eventsources/sources/sftp/validate.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sftp + +import ( + "context" + "fmt" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +// ValidateEventSource validates sftp event source +func (listener *EventListener) ValidateEventSource(ctx context.Context) error { + return validate(&listener.SFTPEventSource) +} + +func validate(sftpEventSource *v1alpha1.SFTPEventSource) error { + if sftpEventSource == nil { + return common.ErrNilEventSource + } + if sftpEventSource.EventType == "" { + return fmt.Errorf("type must be specified") + } + err := sftpEventSource.WatchPathConfig.Validate() + return err +} diff --git a/eventsources/sources/sftp/validate_test.go b/eventsources/sources/sftp/validate_test.go new file mode 100644 index 0000000000..0f56f1b113 --- /dev/null +++ b/eventsources/sources/sftp/validate_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sftp + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/ghodss/yaml" + "github.com/stretchr/testify/assert" + + "github.com/argoproj/argo-events/eventsources/sources" + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" +) + +func TestValidateEventSource(t *testing.T) { + listener := &EventListener{} + + err := listener.ValidateEventSource(context.Background()) + assert.Error(t, err) + assert.Equal(t, "type must be specified", err.Error()) + + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "sftp.yaml")) + assert.Nil(t, err) + + var eventSource *v1alpha1.EventSource + err = yaml.Unmarshal(content, &eventSource) + assert.Nil(t, err) + assert.NotNil(t, eventSource.Spec.SFTP) + + for _, value := range eventSource.Spec.SFTP { + l := &EventListener{ + SFTPEventSource: value, + } + err := l.ValidateEventSource(context.Background()) + assert.NoError(t, err) + } +} diff --git a/eventsources/sources/slack/start.go b/eventsources/sources/slack/start.go index 996f81b228..c5393c342c 100644 --- a/eventsources/sources/slack/start.go +++ b/eventsources/sources/slack/start.go @@ -20,17 +20,18 @@ import ( "bytes" "context" "encoding/json" - "io/ioutil" + "fmt" + "io" "net/http" "time" - "github.com/pkg/errors" "github.com/slack-go/slack" "github.com/slack-go/slack/slackevents" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/webhook" "github.com/argoproj/argo-events/eventsources/sources" metrics "github.com/argoproj/argo-events/metrics" @@ -158,7 +159,7 @@ func (rc *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) } default: - err = errors.New("could not determine slack type from form parameters") + err = fmt.Errorf("could not determine slack type from form parameters") logger.Errorw("failed to determine type of slack post", zap.Error(err)) common.SendInternalErrorResponse(writer, err.Error()) route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) @@ -209,21 +210,21 @@ func (rc *Router) handleEvent(request *http.Request) ([]byte, []byte, error) { var err error var response []byte var data []byte - body, err := getRequestBody(request) + body, err := rc.getRequestBody(request) if err != nil { - return data, response, errors.Wrap(err, "failed to fetch request body") + return data, response, fmt.Errorf("failed to fetch request body, %w", err) } eventsAPIEvent, err := slackevents.ParseEvent(json.RawMessage(body), slackevents.OptionVerifyToken(&slackevents.TokenComparator{VerificationToken: rc.token})) if err != nil { - return data, response, errors.Wrap(err, "failed to extract event") + return data, response, fmt.Errorf("failed to extract event, %w", err) } if eventsAPIEvent.Type == slackevents.URLVerification { var r *slackevents.ChallengeResponse err = json.Unmarshal(body, &r) if err != nil { - return data, response, errors.Wrap(err, "failed to verify the challenge") + return data, response, fmt.Errorf("failed to verify the challenge, %w", err) } response = []byte(r.Challenge) } @@ -231,7 +232,7 @@ func (rc *Router) handleEvent(request *http.Request) ([]byte, []byte, error) { if eventsAPIEvent.Type == slackevents.CallbackEvent { data, err = json.Marshal(&eventsAPIEvent.InnerEvent) if err != nil { - return data, response, errors.Wrap(err, "failed to marshal event data, rejecting the event...") + return data, response, fmt.Errorf("failed to marshal event data, rejecting the event, %w", err) } } @@ -243,12 +244,12 @@ func (rc *Router) handleInteraction(request *http.Request) ([]byte, error) { ie := &slack.InteractionCallback{} err := json.Unmarshal([]byte(payload), ie) if err != nil { - return nil, errors.Wrap(err, "failed to parse interaction event") + return nil, fmt.Errorf("failed to parse interaction event, %w", err) } data, err := json.Marshal(ie) if err != nil { - return nil, errors.Wrap(err, "failed to marshal action data") + return nil, fmt.Errorf("failed to marshal action data, %w", err) } return data, nil @@ -257,24 +258,24 @@ func (rc *Router) handleInteraction(request *http.Request) ([]byte, error) { func (rc *Router) handleSlashCommand(request *http.Request) ([]byte, error) { command, err := slack.SlashCommandParse(request) if err != nil { - return nil, errors.Wrap(err, "failed to parse command") + return nil, fmt.Errorf("failed to parse command, %w", err) } data, err := json.Marshal(command) if err != nil { - return nil, errors.Wrap(err, "failed to marshal command data") + return nil, fmt.Errorf("failed to marshal command data, %w", err) } return data, nil } -func getRequestBody(request *http.Request) ([]byte, error) { +func (rc *Router) getRequestBody(request *http.Request) ([]byte, error) { // Read request payload - body, err := ioutil.ReadAll(request.Body) + body, err := io.ReadAll(io.LimitReader(request.Body, rc.route.Context.GetMaxPayloadSize())) // Reset request.Body ReadCloser to prevent side-effect if re-read - request.Body = ioutil.NopCloser(bytes.NewBuffer(body)) + request.Body = io.NopCloser(bytes.NewBuffer(body)) if err != nil { - return nil, errors.Wrap(err, "failed to parse request body") + return nil, fmt.Errorf("failed to parse request body, %w", err) } return body, nil } @@ -288,30 +289,30 @@ func (rc *Router) verifyRequest(request *http.Request) error { if len(signingSecret) > 0 { sv, err := slack.NewSecretsVerifier(request.Header, signingSecret) if err != nil { - return errors.Wrap(err, "cannot create secrets verifier") + return fmt.Errorf("cannot create secrets verifier, %w", err) } // Read the request body - body, err := getRequestBody(request) + body, err := rc.getRequestBody(request) if err != nil { return err } _, err = sv.Write(body) if err != nil { - return errors.Wrap(err, "error writing body: cannot verify signature") + return fmt.Errorf("error writing body: cannot verify signature, %w", err) } err = sv.Ensure() if err != nil { - return errors.Wrap(err, "signature validation failed") + return fmt.Errorf("signature validation failed, %w", err) } } return nil } // StartListening starts an event source -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) @@ -322,13 +323,13 @@ func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byt log.Info("retrieving the slack token...") token, err := common.GetSecretFromVolume(slackEventSource.Token) if err != nil { - return errors.Wrap(err, "failed to retrieve the token") + return fmt.Errorf("failed to retrieve the token, %w", err) } log.Info("retrieving the signing secret...") signingSecret, err := common.GetSecretFromVolume(slackEventSource.SigningSecret) if err != nil { - return errors.Wrap(err, "failed to retrieve the signing secret") + return fmt.Errorf("failed to retrieve the signing secret, %w", err) } route := webhook.NewRoute(slackEventSource.Webhook, log, el.GetEventSourceName(), el.GetEventName(), el.Metrics) diff --git a/eventsources/sources/slack/start_test.go b/eventsources/sources/slack/start_test.go index b402c8f618..e78a139e8c 100644 --- a/eventsources/sources/slack/start_test.go +++ b/eventsources/sources/slack/start_test.go @@ -22,7 +22,7 @@ import ( "crypto/sha256" "encoding/hex" "encoding/json" - "io/ioutil" + "io" "net/http" "strconv" "strings" @@ -64,7 +64,7 @@ func TestRouteActiveHandler(t *testing.T) { convey.So(err, convey.ShouldBeNil) convey.So(payload, convey.ShouldNotBeNil) router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(payload)), + Body: io.NopCloser(bytes.NewReader(payload)), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusInternalServerError) }) @@ -102,7 +102,7 @@ func TestSlackSignature(t *testing.T) { }() router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(payload)), + Body: io.NopCloser(bytes.NewReader(payload)), Header: h, Method: "POST", }) @@ -137,7 +137,7 @@ func TestInteractionHandler(t *testing.T) { router.HandleRoute(writer, &http.Request{ Method: http.MethodPost, Header: headers, - Body: ioutil.NopCloser(strings.NewReader(buf.String())), + Body: io.NopCloser(strings.NewReader(buf.String())), }) result := <-out convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK) @@ -173,7 +173,7 @@ func TestSlackCommandHandler(t *testing.T) { router.HandleRoute(writer, &http.Request{ Method: http.MethodPost, Header: headers, - Body: ioutil.NopCloser(strings.NewReader(buf.String())), + Body: io.NopCloser(strings.NewReader(buf.String())), }) result := <-out convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK) @@ -223,7 +223,7 @@ func TestEventHandler(t *testing.T) { }() router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewBuffer(payload)), + Body: io.NopCloser(bytes.NewBuffer(payload)), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusInternalServerError) }) diff --git a/eventsources/sources/slack/validate_test.go b/eventsources/sources/slack/validate_test.go index fad759ecfa..712f5e4a9c 100644 --- a/eventsources/sources/slack/validate_test.go +++ b/eventsources/sources/slack/validate_test.go @@ -19,7 +19,7 @@ package slack import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "token not provided", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "slack.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "slack.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/storagegrid/start.go b/eventsources/sources/storagegrid/start.go index a37d774690..db6aa02c24 100644 --- a/eventsources/sources/storagegrid/start.go +++ b/eventsources/sources/storagegrid/start.go @@ -20,7 +20,7 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" "net/url" "strings" @@ -30,11 +30,11 @@ import ( "github.com/go-resty/resty/v2" "github.com/google/uuid" "github.com/joncalhoun/qson" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/webhook" "github.com/argoproj/argo-events/eventsources/sources" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -140,7 +140,8 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ } logger.Info("parsing the request body...") - body, err := ioutil.ReadAll(request.Body) + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) + body, err := io.ReadAll(request.Body) if err != nil { logger.Errorw("failed to parse request body", zap.Error(err)) common.SendErrorResponse(writer, "") @@ -213,7 +214,7 @@ func (router *Router) PostActivate() error { authToken, err := common.GetSecretFromVolume(eventSource.AuthToken) if err != nil { - return errors.Wrap(err, "AuthToken not found") + return fmt.Errorf("AuthToken not found, %w", err) } registrationURL := common.FormattedURL(eventSource.Webhook.URL, eventSource.Webhook.Endpoint) @@ -323,7 +324,7 @@ func (router *Router) PostActivate() error { if !response.IsSuccess() { errObj := response.Error().(*genericResponse) - return errors.Errorf("failed to configure notification. reason %s", errObj.Message.Text) + return fmt.Errorf("failed to configure notification. reason %s", errObj.Message.Text) } logger.Info("successfully registered notification configuration on storagegrid") @@ -336,7 +337,7 @@ func (router *Router) PostInactivate() error { } // StartListening starts an event source -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the Storage Grid event source...") diff --git a/eventsources/sources/storagegrid/start_test.go b/eventsources/sources/storagegrid/start_test.go index 30969e8984..3649f3ea80 100644 --- a/eventsources/sources/storagegrid/start_test.go +++ b/eventsources/sources/storagegrid/start_test.go @@ -19,7 +19,7 @@ package storagegrid import ( "bytes" "encoding/json" - "io/ioutil" + "io" "net/http" "testing" @@ -102,7 +102,7 @@ func TestRouteActiveHandler(t *testing.T) { pbytes, err := yaml.Marshal(storageGridEventSource) convey.So(err, convey.ShouldBeNil) router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader(pbytes)), + Body: io.NopCloser(bytes.NewReader(pbytes)), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusBadRequest) }) @@ -117,7 +117,7 @@ func TestRouteActiveHandler(t *testing.T) { }() router.HandleRoute(writer, &http.Request{ - Body: ioutil.NopCloser(bytes.NewReader([]byte(notification))), + Body: io.NopCloser(bytes.NewReader([]byte(notification))), }) convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK) }) diff --git a/eventsources/sources/storagegrid/validate_test.go b/eventsources/sources/storagegrid/validate_test.go index 2a993edb80..b575d507e4 100644 --- a/eventsources/sources/storagegrid/validate_test.go +++ b/eventsources/sources/storagegrid/validate_test.go @@ -19,7 +19,7 @@ package storagegrid import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -35,7 +35,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "topic arn must be provided", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "storage-grid.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "storage-grid.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/stripe/start.go b/eventsources/sources/stripe/start.go index 1faa7de41f..92648e969a 100644 --- a/eventsources/sources/stripe/start.go +++ b/eventsources/sources/stripe/start.go @@ -19,17 +19,18 @@ package stripe import ( "context" "encoding/json" - "io/ioutil" + "fmt" + "io" "net/http" "time" - "github.com/pkg/errors" "github.com/stripe/stripe-go" "github.com/stripe/stripe-go/webhookendpoint" "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/webhook" "github.com/argoproj/argo-events/eventsources/sources" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -94,9 +95,8 @@ func (rc *Router) HandleRoute(writer http.ResponseWriter, request *http.Request) route.Metrics.EventProcessingDuration(route.EventSourceName, route.EventName, float64(time.Since(start)/time.Millisecond)) }(time.Now()) - const MaxBodyBytes = int64(65536) - request.Body = http.MaxBytesReader(writer, request.Body, MaxBodyBytes) - payload, err := ioutil.ReadAll(request.Body) + request.Body = http.MaxBytesReader(writer, request.Body, route.Context.GetMaxPayloadSize()) + payload, err := io.ReadAll(request.Body) if err != nil { logger.Errorw("error reading request body", zap.Error(err)) writer.WriteHeader(http.StatusServiceUnavailable) @@ -151,7 +151,7 @@ func (rc *Router) PostActivate() error { apiKey, err := common.GetSecretFromVolume(stripeEventSource.APIKey) if err != nil { - return errors.Wrap(err, "APIKey not found") + return fmt.Errorf("APIKey not found, %w", err) } stripe.Key = apiKey @@ -190,7 +190,7 @@ func filterEvent(event *stripe.Event, filters []string) bool { } // StartListening starts an event source -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the Stripe event source...") diff --git a/eventsources/sources/stripe/validate.go b/eventsources/sources/stripe/validate.go index d5d5bb1c3b..827166ef97 100644 --- a/eventsources/sources/stripe/validate.go +++ b/eventsources/sources/stripe/validate.go @@ -18,8 +18,7 @@ package stripe import ( "context" - - "github.com/pkg/errors" + "fmt" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/eventsources/common/webhook" @@ -37,7 +36,7 @@ func validate(eventSource *v1alpha1.StripeEventSource) error { } if eventSource.CreateWebhook { if eventSource.APIKey == nil { - return errors.New("api key K8s secret selector not provided") + return fmt.Errorf("api key K8s secret selector not provided") } } return webhook.ValidateWebhookContext(eventSource.Webhook) diff --git a/eventsources/sources/stripe/validate_test.go b/eventsources/sources/stripe/validate_test.go index a3b9e8bf5e..12e8bc44a6 100644 --- a/eventsources/sources/stripe/validate_test.go +++ b/eventsources/sources/stripe/validate_test.go @@ -19,7 +19,7 @@ package stripe import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -40,7 +40,7 @@ func TestValidateEventSource(t *testing.T) { assert.Error(t, err) assert.Equal(t, "api key K8s secret selector not provided", err.Error()) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "stripe.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "stripe.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource diff --git a/eventsources/sources/webhook/start.go b/eventsources/sources/webhook/start.go index 90d49b600b..2be2ad90dc 100644 --- a/eventsources/sources/webhook/start.go +++ b/eventsources/sources/webhook/start.go @@ -17,21 +17,23 @@ limitations under the License. package webhook import ( + "bytes" "context" "encoding/json" - "io/ioutil" + "fmt" + "io" "net/http" "time" - "go.uber.org/zap" - "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" + eventsourcecommon "github.com/argoproj/argo-events/eventsources/common" "github.com/argoproj/argo-events/eventsources/common/webhook" metrics "github.com/argoproj/argo-events/metrics" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/events" "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" + "go.uber.org/zap" ) var ( @@ -46,7 +48,7 @@ func init() { type EventListener struct { EventSourceName string EventName string - WebhookContext v1alpha1.WebhookContext + Webhook v1alpha1.WebhookEventSource Metrics *metrics.Metrics } @@ -100,13 +102,19 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ return } + if route.Context.Method != request.Method { + logger.Info("http method does not match") + common.SendErrorResponse(writer, "http method does not match") + return + } + defer func(start time.Time) { route.Metrics.EventProcessingDuration(route.EventSourceName, route.EventName, float64(time.Since(start)/time.Millisecond)) }(time.Now()) - body, err := ioutil.ReadAll(request.Body) + body, err := GetBody(&writer, request, route, logger) if err != nil { - logger.Errorw("failed to parse request body", zap.Error(err)) + logger.Errorw("failed to get body", zap.Error(err)) common.SendErrorResponse(writer, err.Error()) route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) return @@ -114,7 +122,7 @@ func (router *Router) HandleRoute(writer http.ResponseWriter, request *http.Requ payload := &events.WebhookEventData{ Header: request.Header, - Body: (*json.RawMessage)(&body), + Body: body, Metadata: route.Context.Metadata, } @@ -143,13 +151,65 @@ func (router *Router) PostInactivate() error { } // StartListening starts listening events -func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte) error) error { +func (el *EventListener) StartListening(ctx context.Context, dispatch func([]byte, ...eventsourcecommon.Option) error) error { log := logging.FromContext(ctx). With(logging.LabelEventSourceType, el.GetEventSourceType(), logging.LabelEventName, el.GetEventName()) log.Info("started processing the webhook event source...") - route := webhook.NewRoute(&el.WebhookContext, log, el.GetEventSourceName(), el.GetEventName(), el.Metrics) + route := webhook.NewRoute(&el.Webhook.WebhookContext, log, el.GetEventSourceName(), el.GetEventName(), el.Metrics) return webhook.ManageRoute(ctx, &Router{ route: route, }, controller, dispatch) } + +func GetBody(writer *http.ResponseWriter, request *http.Request, route *webhook.Route, logger *zap.SugaredLogger) (*json.RawMessage, error) { + switch request.Method { + case http.MethodGet: + body, _ := json.Marshal(request.URL.Query()) + ret := json.RawMessage(body) + return &ret, nil + case http.MethodPost: + contentType := "" + if len(request.Header["Content-Type"]) > 0 { + contentType = request.Header["Content-Type"][0] + } + + switch contentType { + case "application/x-www-form-urlencoded": + if err := request.ParseForm(); err != nil { + logger.Errorw("failed to parse form data", zap.Error(err)) + common.SendInternalErrorResponse(*writer, err.Error()) + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return nil, err + } + body, _ := json.Marshal(request.PostForm) + ret := json.RawMessage(body) + return &ret, nil + // default including "application/json" is parsing body as JSON + default: + request.Body = http.MaxBytesReader(*writer, request.Body, route.Context.GetMaxPayloadSize()) + body, err := getRequestBody(request) + if err != nil { + logger.Errorw("failed to read request body", zap.Error(err)) + common.SendErrorResponse(*writer, err.Error()) + route.Metrics.EventProcessingFailed(route.EventSourceName, route.EventName) + return nil, err + } + ret := json.RawMessage(body) + return &ret, nil + } + default: + return nil, fmt.Errorf("unsupoorted method: %s", request.Method) + } +} + +func getRequestBody(request *http.Request) ([]byte, error) { + // Read request payload + body, err := io.ReadAll(request.Body) + // Reset request.Body ReadCloser to prevent side-effect if re-read + request.Body = io.NopCloser(bytes.NewBuffer(body)) + if err != nil { + return nil, fmt.Errorf("failed to parse request body, %w", err) + } + return body, nil +} diff --git a/eventsources/sources/webhook/start_test.go b/eventsources/sources/webhook/start_test.go new file mode 100644 index 0000000000..2c39923cb3 --- /dev/null +++ b/eventsources/sources/webhook/start_test.go @@ -0,0 +1,112 @@ +package webhook + +import ( + "bytes" + "io" + "net/http" + "net/url" + "strings" + "testing" + + "github.com/smartystreets/goconvey/convey" + + "github.com/argoproj/argo-events/eventsources/common/webhook" +) + +func TestHandleRoute(t *testing.T) { + convey.Convey("Given a route that receives HTTP access", t, func() { + router := &Router{ + route: webhook.GetFakeRoute(), + } + writer := &webhook.FakeHttpWriter{} + + convey.Convey("Test Get method with query parameters", func() { + url, _ := url.Parse("http://example.com/fake?aaa=b%20b&ccc=d%20d") + out := make(chan []byte) + router.route.Active = true + router.route.Context.Method = http.MethodGet + + go func() { + out <- <-router.route.DataCh + }() + + router.HandleRoute(writer, &http.Request{ + Method: http.MethodGet, + URL: url, + }) + result := <-out + convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK) + convey.So(string(result), convey.ShouldContainSubstring, `"body":{"aaa":["b b"],"ccc":["d d"]}`) + }) + convey.Convey("Test Get method without query parameter", func() { + url, _ := url.Parse("http://example.com/fake") + out := make(chan []byte) + router.route.Active = true + router.route.Context.Method = http.MethodGet + + go func() { + out <- <-router.route.DataCh + }() + + router.HandleRoute(writer, &http.Request{ + Method: http.MethodGet, + URL: url, + }) + result := <-out + convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK) + convey.So(string(result), convey.ShouldContainSubstring, `"body":{}`) + }) + convey.Convey("Test POST method with form-urlencoded", func() { + payload := []byte(`aaa=b%20b&ccc=d%20d`) + + out := make(chan []byte) + router.route.Active = true + router.route.Context.Method = http.MethodPost + + go func() { + out <- <-router.route.DataCh + }() + + var buf bytes.Buffer + buf.Write(payload) + + headers := make(map[string][]string) + headers["Content-Type"] = []string{"application/x-www-form-urlencoded"} + + router.HandleRoute(writer, &http.Request{ + Method: http.MethodPost, + Header: headers, + Body: io.NopCloser(strings.NewReader(buf.String())), + }) + result := <-out + convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK) + convey.So(string(result), convey.ShouldContainSubstring, `"body":{"aaa":["b b"],"ccc":["d d"]}`) + }) + convey.Convey("Test POST method with json", func() { + payload := []byte(`{"aaa":["b b"],"ccc":["d d"]}`) + + out := make(chan []byte) + router.route.Active = true + router.route.Context.Method = http.MethodPost + + go func() { + out <- <-router.route.DataCh + }() + + var buf bytes.Buffer + buf.Write(payload) + + headers := make(map[string][]string) + headers["Content-Type"] = []string{"application/json"} + + router.HandleRoute(writer, &http.Request{ + Method: http.MethodPost, + Header: headers, + Body: io.NopCloser(strings.NewReader(buf.String())), + }) + result := <-out + convey.So(writer.HeaderStatus, convey.ShouldEqual, http.StatusOK) + convey.So(string(result), convey.ShouldContainSubstring, `"body":{"aaa":["b b"],"ccc":["d d"]}`) + }) + }) +} diff --git a/eventsources/sources/webhook/validate.go b/eventsources/sources/webhook/validate.go index 3515007fc5..2569b71112 100644 --- a/eventsources/sources/webhook/validate.go +++ b/eventsources/sources/webhook/validate.go @@ -26,12 +26,12 @@ import ( // ValidateEventSource validates webhook event source func (listener *EventListener) ValidateEventSource(ctx context.Context) error { - return validate(&listener.WebhookContext) + return validate(&listener.Webhook) } -func validate(webhookEventSource *v1alpha1.WebhookContext) error { +func validate(webhookEventSource *v1alpha1.WebhookEventSource) error { if webhookEventSource == nil { return common.ErrNilEventSource } - return webhook.ValidateWebhookContext(webhookEventSource) + return webhook.ValidateWebhookContext(&webhookEventSource.WebhookContext) } diff --git a/eventsources/sources/webhook/validate_test.go b/eventsources/sources/webhook/validate_test.go index 3384f67948..590740683f 100644 --- a/eventsources/sources/webhook/validate_test.go +++ b/eventsources/sources/webhook/validate_test.go @@ -19,7 +19,7 @@ package webhook import ( "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/eventsources/sources" @@ -30,13 +30,13 @@ import ( func TestValidateEventSource(t *testing.T) { listener := &EventListener{ - WebhookContext: v1alpha1.WebhookContext{}, + Webhook: v1alpha1.WebhookEventSource{}, } err := listener.ValidateEventSource(context.Background()) assert.Error(t, err) - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "webhook.yaml")) + content, err := os.ReadFile(fmt.Sprintf("%s/%s", sources.EventSourceDir, "webhook.yaml")) assert.Nil(t, err) var eventSource *v1alpha1.EventSource @@ -46,7 +46,7 @@ func TestValidateEventSource(t *testing.T) { for _, value := range eventSource.Spec.Webhook { l := &EventListener{ - WebhookContext: value, + Webhook: value, } err = l.ValidateEventSource(context.Background()) assert.NoError(t, err) diff --git a/examples/event-sources/amqp.yaml b/examples/event-sources/amqp.yaml index f3d475fbe3..5fbfab0fa1 100644 --- a/examples/event-sources/amqp.yaml +++ b/examples/event-sources/amqp.yaml @@ -28,26 +28,29 @@ spec: factor: 2 jitter: 0.2 # optional exchange settings - # if not povided, default values will be used + # if not provided, default values will be used exchangeDeclare: durable: true autoDelete: false internal: false noWait: false # optional queue settings - # if not povided, default values will be used + # if not provided, default values will be used queueDeclare: name: "my-queue-name" durable: false autoDelete: false exclusive: true noWait: false + # example argument to use quorum queues + # arguments: |- + # x-queue-type: quorum # optional queue binding settings - # if not povided, default values will be used + # if not provided, default values will be used queueBind: noWait: false # optional consume settings - # if not povided, default values will be used + # if not provided, default values will be used consume: consumerTag: "my-consumer-tag" autoAck: true diff --git a/examples/event-sources/azure-queue-storage.yaml b/examples/event-sources/azure-queue-storage.yaml new file mode 100644 index 0000000000..0710c9b8f0 --- /dev/null +++ b/examples/event-sources/azure-queue-storage.yaml @@ -0,0 +1,33 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: azure-queue-storage +spec: + azureQueueStorage: + example: + # queueName is the name of the queue to listen to + queueName: test + # jsonBody specifies that all event body payload coming from this + # source will be JSON + jsonBody: true + # DecodeMessage specifies if all the messages from AQS should be base64 decoded + decodeMessage: false + # waitTimeInSeconds defines the wait time between empty reads from the queue + waitTimeInSeconds: 2 + # connection string contains information about K8s secret that stores the connection string + connectionString: + # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key + key: connectionstring + # Name of the K8s secret that contains the access key + name: azure-secret + + # example-with-managed-identity: + # # queueName is the name of the queue to listen to + # queueName: test + # # jsonBody specifies that all event body payload coming from this + # # source will be JSON + # jsonBody: true + # # storageAccountName defines the name of the storage account + # # it is is mandatory to set it to connect via azure AD + # # this field is ignored if connectionString is set + # storageAccountName: "mystorageaccount" diff --git a/examples/event-sources/azure-service-bus.yaml b/examples/event-sources/azure-service-bus.yaml new file mode 100644 index 0000000000..375dfad1d3 --- /dev/null +++ b/examples/event-sources/azure-service-bus.yaml @@ -0,0 +1,28 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: azure-service-bus +spec: + azureServiceBus: + example: + # queueName is the name of the queue to listen to + queueName: test + # jsonBody specifies that all event body payload coming from this + # source will be JSON + jsonBody: true + # connection string contains information about K8s secret that stores the connection string + connectionString: + # Key within the K8s secret whose corresponding value (must be base64 encoded) is access key + key: connectionstring + # Name of the K8s secret that contains the access key + name: azure-secret + + # example-managed-identity: + # # queueName is the name of the queue to listen to + # queueName: test + # # jsonBody specifies that all event body payload coming from this + # # source will be JSON + # jsonBody: true + # # fullyQualifiedNamespace is the Service Bus namespace name. This field is necessary to access via + # # Azure AD (managed identity) and it is ignored if ConnectionString is set + # fullyQualifiedNamespace: myservicebus.servicebus.windows.net \ No newline at end of file diff --git a/examples/event-sources/bitbucket.yaml b/examples/event-sources/bitbucket.yaml new file mode 100644 index 0000000000..508dec6f48 --- /dev/null +++ b/examples/event-sources/bitbucket.yaml @@ -0,0 +1,104 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: bitbucket +spec: + service: + ports: + - port: 12000 + targetPort: 12000 + bitbucket: + # bitbucket eventsource example with basic auth strategy + example: + # Bitbucket repository list + repositories: + - owner: "name-of-owner" # owner of the repository + repositorySlug: "name-of-repository1" # repository slug + - owner: "name-of-owner" + repositorySlug: "name-of-repository2" + # events to listen to + # Visit https://support.atlassian.com/bitbucket-cloud/docs/manage-webhooks/ + events: + - repo:push + # Bitbucket will send webhook events to the following port and endpoint + webhook: + # endpoint to listen to events on + endpoint: /push + # port to run internal HTTP server on + port: "12000" + # HTTP request method to allow. In this case, only POST requests are accepted + method: POST + # url the event-source will use to register in Bitbucket. + # This url must be reachable from outside the cluster. + # The name for the service is in `-eventsource-svc` format. + # You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket. + url: http://url-that-is-reachable-from-bitbucket + # Delete the webhook when the eventsource is deleted + deleteHookOnFinish: true + auth: +# # oauthToken refers to K8s secret that stores the bitbucket OAuth bearer token +# # Visit https://support.atlassian.com/bitbucket-cloud/docs/use-oauth-on-bitbucket-cloud/ +# oauthToken: +# # Name of the K8s secret that contains the oauth token +# name: bitbucket-access +# # Key within the K8s secret whose corresponding value (must be base64 encoded) is oauth token +# key: token + + # basic refers to Basic Auth strategy and can be used with App passwords + # Visit https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/ + basic: + # username refers to K8s secret that stores the bitbucket username + username: + # Name of the K8s secret that contains the username + name: bitbucket-access + # Key within the K8s secret whose corresponding value (must be base64 encoded) is username + key: username + # password refers to K8s secret that stores the bitbucket password (including App passwords) + password: + # Name of the K8s secret that contains the password + name: bitbucket-access + # Key within the K8s secret whose corresponding value (must be base64 encoded) is password + key: password + + +# # bitbucket eventsource example with deprecated syntax using basic auth strategy +# example-with-deprecated-repo-syntax: +# # owner of the repository +# owner: "name-of-repo-owner" +# # repository slug +# repositorySlug: "name-of-repository" +# # Bitbucket will send events to following port and endpoint +# webhook: +# # endpoint to listen to events on +# endpoint: /push +# # port to run internal HTTP server on +# port: "12000" +# # HTTP request method to allow. In this case, only POST requests are accepted +# method: POST +# # url the event-source will use to register in Bitbucket. +# # This url must be reachable from outside the cluster. +# # The name for the service is in `-eventsource-svc` format. +# # You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket. +# url: http://url-that-is-reachable-from-bitbucket +# # events to listen to +# # Visit https://support.atlassian.com/bitbucket-cloud/docs/manage-webhooks/ +# events: +# - repo:push +# auth: +# # basic refers to Basic Auth strategy and can be used with App passwords +# # Visit https://support.atlassian.com/bitbucket-cloud/docs/app-passwords/ +# basic: +# # username refers to K8s secret that stores the bitbucket username +# username: +# # Name of the K8s secret that contains the username +# name: bitbucket-access +# # Key within the K8s secret whose corresponding value (must be base64 encoded) is username +# key: username +# # password refers to K8s secret that stores the bitbucket password (including App passwords) +# password: +# # Name of the K8s secret that contains the password +# name: bitbucket-access +# # Key within the K8s secret whose corresponding value (must be base64 encoded) is password +# key: password +# # Delete the webhook when the eventsource is deleted +# deleteHookOnFinish: true diff --git a/examples/event-sources/bitbucketserver.yaml b/examples/event-sources/bitbucketserver.yaml new file mode 100644 index 0000000000..e8aaa8a6c5 --- /dev/null +++ b/examples/event-sources/bitbucketserver.yaml @@ -0,0 +1,88 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: bitbucketserver +spec: + service: + ports: + - port: 12000 + targetPort: 12000 + bitbucketserver: + example: + # Bitbucket Server repository list + repositories: + - projectKey: "PROJECTKEY1" # key of the project + repositorySlug: "name-of-repository1" # repository slug + - projectKey: "PROJECTKEY2" + repositorySlug: "name-of-repository2" + + # Bitbucket Server will send events to following port and endpoint + webhook: + # endpoint to listen to events on + endpoint: /push + # port to run internal HTTP server on + port: "12000" + # HTTP request method to allow. In this case, only POST requests are accepted + method: POST + # url the event-source will use to register in Bitbucket Server. + # This url must be reachable from outside the cluster. + # The name for the service is in `-eventsource-svc` format. + # You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket Server. + url: http://url-that-is-reachable-from-Bitbucket-Server + # events to listen to + # Visit https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html + events: + - repo:refs_changed + # accessToken refers to K8s secret that stores the bitbucket personal api token + accessToken: + # Name of the K8s secret that contains the access token + name: bitbucketserver-access + # Key within the K8s secret whose corresponding value (must be base64 encoded) is access token + key: token + webhookSecret: + # Name of the K8s secret that contains the webhook secret + name: bitbucketserver-access + # Key within the K8s secret whose corresponding value (must be base64 encoded) is the webhook secret + key: secret + # Delete the webhook when the eventsource is deleted + deleteHookOnFinish: true + # Bitbucket Base url to the REST API. This URL should include the "/rest" path. + bitbucketserverBaseURL: https://my-bitbucket-server.com/rest + +# example-with-deprecated-repo-syntax: +# # key of the project +# projectKey: "PROJECTKEY" +# # repository slug +# repositorySlug: "name-of-repository" +# # Bitbucket Server will send events to following port and endpoint +# webhook: +# # endpoint to listen to events on +# endpoint: /push +# # port to run internal HTTP server on +# port: "12000" +# # HTTP request method to allow. In this case, only POST requests are accepted +# method: POST +# # url the event-source will use to register in Bitbucket Server. +# # This url must be reachable from outside the cluster. +# # The name for the service is in `-eventsource-svc` format. +# # You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Bitbucket Server. +# url: http://url-that-is-reachable-from-Bitbucket-Server +# # events to listen to +# # Visit https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html +# events: +# - repo:refs_changed +# # accessToken refers to K8s secret that stores the bitbucket personal api token +# accessToken: +# # Name of the K8s secret that contains the access token +# name: bitbucketserver-access +# # Key within the K8s secret whose corresponding value (must be base64 encoded) is access token +# key: token +# webhookSecret: +# # Name of the K8s secret that contains the webhook secret +# name: bitbucketserver-access +# # Key within the K8s secret whose corresponding value (must be base64 encoded) is the webhook secret +# key: secret +# # Delete the webhook when the eventsource is deleted +# deleteHookOnFinish: true +# # Bitbucket Base url to the REST API. This URL should include the "/rest" path. +# bitbucketserverBaseURL: https://my-bitbucket-server.com/rest diff --git a/examples/event-sources/calendar.yaml b/examples/event-sources/calendar.yaml index 220ec25325..a379eba8f5 100644 --- a/examples/event-sources/calendar.yaml +++ b/examples/event-sources/calendar.yaml @@ -26,18 +26,20 @@ spec: # # schedule-with-static-user-payload: # schedule: "30 * * * *" -# # userPayload is a static string that will be send to the sensor with each event payload +# # metadata contains key-value pairs that will be send to the sensor with each event payload # # whatever you put here is blindly delivered to sensor. -# # access in resourceParameters or templateParameters via the path userPayload.hello -# userPayload: {"hello": "world"} +# # access in resourceParameters or templateParameters via the path metadata.hello +# metadata: +# hello: world # # schedule-in-specific-timezone: # # creates an event every 20 seconds # interval: "20s" -# # userPayload is a static string that will be send to the sensor with each event payload +# # metadata containes key-value pairs that will be send to the sensor with each event payload # # whatever you put here is blindly delivered to sensor. -# # access in resourceParameters or templateParameters via the path userPayload.hello -# userPayload: {"hello": "world"} +# # access in resourceParameters or templateParameters via the path metadata.hello +# metadata: +# hello: world # # timezone # # more info: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones # timezone: "America/New_York" diff --git a/examples/event-sources/gcp-pubsub.yaml b/examples/event-sources/gcp-pubsub.yaml index 3f9302309c..0a8020e4f5 100644 --- a/examples/event-sources/gcp-pubsub.yaml +++ b/examples/event-sources/gcp-pubsub.yaml @@ -44,6 +44,12 @@ spec: # template: # serviceAccountName: my-sa +# # (optional) you can add a PUBSUB_EMULATOR_HOST environment variable to the EventSource pod to connect +# # to a PubSub emulator (no credentials needed) +# container: +# env: +# - name: PUBSUB_EMULATOR_HOST +# value: pubsub-emulator:9030 # pubSub: # example-workload-identity: # # (optional) jsonBody specifies that all event body payload coming from this diff --git a/examples/event-sources/gerrit.yaml b/examples/event-sources/gerrit.yaml new file mode 100644 index 0000000000..3bf2ad7108 --- /dev/null +++ b/examples/event-sources/gerrit.yaml @@ -0,0 +1,52 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: gerrit +spec: + service: + ports: + - port: 13000 + targetPort: 13000 + gerrit: + example: + projects: + - "argo-webhook" + - "argo/webhook" + hookName: "argo-webhook-example" + # Gerrit will send events to following port and endpoint + webhook: + # endpoint to listen to events on + endpoint: /push + # port to run internal HTTP server on + port: "13000" + # HTTP request method to allow. In this case, only POST requests are accepted + method: POST + # url the event-source will use to register at Gerrit. + # This url must be reachable from outside the cluster. + # The name for the service is in `-eventsource-svc` format. + # You will need to create an Ingress or Openshift Route for the event-source service so that it can be reached from Gerrit. + url: http://url-that-is-reachable-from-Gerrit + # events to listen to + # Visit https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events + events: + - patchset-created + auth: + # username refers to K8s secret that stores the bitbucket username + username: + # Name of the K8s secret that contains the username + name: gerrit-access + # Key within the K8s secret whose corresponding value (must be base64 encoded) is username + key: username + # password refers to K8s secret that stores the bitbucket password (including App passwords) + password: + # Name of the K8s secret that contains the password + name: gerrit-access + # Key within the K8s secret whose corresponding value (must be base64 encoded) is password + key: password + # Do SSL verification when triggering the hook + sslVerify: false + # Gerrit Base url. + # Change it to your private Gerrit instance url if you have one. + gerritBaseURL: http://10.0.71.100:58080/ + deleteHookOnFinish: true + diff --git a/examples/event-sources/github.yaml b/examples/event-sources/github.yaml index 43da141ab5..d0836a531b 100644 --- a/examples/event-sources/github.yaml +++ b/examples/event-sources/github.yaml @@ -6,8 +6,12 @@ metadata: spec: service: ports: - - port: 12000 + - name: example + port: 12000 targetPort: 12000 + - name: example-without-api-credentials + port: 13000 + targetPort: 13000 github: example: repositories: @@ -15,6 +19,13 @@ spec: names: - argo-events - argo-workflows + # Github application auth. Instead of using personal token `apiToken` use app PEM +# githubApp: +# privateKey: +# name: github-app-pem +# key: privateKey.pem +# appID: +# installationID: # Github will send events to following port and endpoint webhook: # endpoint to listen to events on diff --git a/examples/event-sources/gitlab.yaml b/examples/event-sources/gitlab.yaml index 6beaca5a5f..55a32944b6 100644 --- a/examples/event-sources/gitlab.yaml +++ b/examples/event-sources/gitlab.yaml @@ -10,9 +10,11 @@ spec: targetPort: 12000 gitlab: example: - # id of the project - projectID: "1" - # Github will send events to following port and endpoint + # Project namespace paths or IDs + projects: + - "whynowy/test" + - "3" + # GitLab will send events to following port and endpoint webhook: # endpoint to listen to events on endpoint: /push @@ -36,11 +38,16 @@ spec: key: token # Name of the K8s secret that contains the access token name: gitlab-access + # secret token used by gitlab webhook + secretToken: + key: token + name: gitlab-secret # Do SSL verification when triggering the hook enableSSLVerification: false # Gitlab Base url. # Change it to your private GitLab instance url if you have one. gitlabBaseURL: https://gitlab.com + deleteHookOnFinish: true # example-secure: # projectId: "2" diff --git a/examples/event-sources/mqtt.yaml b/examples/event-sources/mqtt.yaml index 464f1e5b97..c806e981bf 100644 --- a/examples/event-sources/mqtt.yaml +++ b/examples/event-sources/mqtt.yaml @@ -7,7 +7,7 @@ spec: example: # mqtt broker url url: tcp://mqtt.argo-events:1883 - # name of the popic + # name of the topic topic: bar # jsonBody specifies that all event body payload coming from this # source will be JSON @@ -42,3 +42,17 @@ spec: # name: my-secret # key: client-key-key +# example-auth: +# url: "tcp://mqtt.argo-events:1883" +# topic: "bar" +# jsonBody: true +# clientId: "2345" +# # username and password for authentication +# # use secret selectors +# auth: +# username: +# name: my-secret +# key: username +# password: +# name: my-secret +# key: password \ No newline at end of file diff --git a/examples/event-sources/nats.yaml b/examples/event-sources/nats.yaml index 50ce10ccd9..d1bc417c95 100644 --- a/examples/event-sources/nats.yaml +++ b/examples/event-sources/nats.yaml @@ -79,3 +79,12 @@ spec: # name: my-secret # key: my-credential +# example-queued-subscription: +# url: nats://nats.argo-events.svc:4222 +# jsonBody: true +# subject: "foo" +# queue: "my-queue" +# auth: +# credential: +# name: my-secret +# key: my-credential diff --git a/examples/event-sources/redis-streams.yaml b/examples/event-sources/redis-streams.yaml new file mode 100644 index 0000000000..59cc06f55f --- /dev/null +++ b/examples/event-sources/redis-streams.yaml @@ -0,0 +1,50 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: redis-stream +spec: + redisStream: + example: + # HostAddress refers to the address of the Redis host/server + hostAddress: redis.argo-events.svc:6379 + + # Password required for authentication. + # +optional + # password: + # name: name_of_secret_that_holds_password + # key: key_within_secret_which_holds_password_value + + # DB to use. If not specified, default DB 0 will be used. + # +optional + db: 0 + + # MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams + # Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. + # Same as COUNT option in XREADGROUP(https://redis.io/topics/streams-intro). Defaults to 10 + # +optional + # maxMsgCountPerRead: 50 + + # ConsumerGroup refers to the Redis stream consumer group that will be created on all redis streams. + # Messages are read through this group. Defaults to 'argo-events-cg' + # +optional + # consumerGroup: argo-events-cg + + # Streams to listen for events. XREADGROUP is used on all streams using a single consumer group. + streams: + - FOO + +# example-tls: +# hostAddress: redis.argo-events.svc:6379 +# db: 0 +# streams: +# - FOO +# tls: +# caCertSecret: +# name: my-secret +# key: ca-cert-key +# clientCertSecret: +# name: my-secret +# key: client-cert-key +# clientKeySecret: +# name: my-secret +# key: client-key-key \ No newline at end of file diff --git a/examples/event-sources/sftp.yaml b/examples/event-sources/sftp.yaml new file mode 100644 index 0000000000..7458acaeef --- /dev/null +++ b/examples/event-sources/sftp.yaml @@ -0,0 +1,40 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: sftp +spec: + template: + container: + volumeMounts: + - mountPath: /test-data + name: test-data + volumes: + - name: test-data + emptyDir: {} + sftp: + example: + username: + key: username + name: username + password: + key: password + name: password + address: + key: address + name: address + watchPathConfig: + # directory to watch + directory: /test-data/ + # path to watch + path: x.txt + pollIntervalDuration: 10s + # type of the event + # supported types are: CREATE, REMOVE + eventType: CREATE + +# example-with-path-regex: +# watchPathConfig: +# directory: "/bin/" +# # the eventsource will watch events for path that matches following regex +# pathRegexp: "([a-z]+).txt" +# eventType: "CREATE" diff --git a/examples/rbac/sensor-rbac.yaml b/examples/rbac/sensor-rbac.yaml new file mode 100644 index 0000000000..be4109e9c0 --- /dev/null +++ b/examples/rbac/sensor-rbac.yaml @@ -0,0 +1,32 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: operate-workflow-sa +--- +# Similarly you can use a ClusterRole and ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: operate-workflow-role +rules: + - apiGroups: + - argoproj.io + verbs: + - "*" + resources: + - workflows + - workflowtemplates + - cronworkflows + - clusterworkflowtemplates +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: operate-workflow-role-binding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: operate-workflow-role +subjects: + - kind: ServiceAccount + name: operate-workflow-sa diff --git a/examples/rbac/workflow-rbac.yaml b/examples/rbac/workflow-rbac.yaml new file mode 100644 index 0000000000..05ef65855a --- /dev/null +++ b/examples/rbac/workflow-rbac.yaml @@ -0,0 +1,29 @@ +# This file enables a Workflow Pod (running Emissary executor) to be able to read and patch WorkflowTaskResults, +# which get shared with the Workflow Controller. The Controller uses the results to update Workflow status. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + annotations: + workflows.argoproj.io/description: | + Recomended minimum permissions for the `emissary` executor. + name: executor +rules: +- apiGroups: + - argoproj.io + resources: + - workflowtaskresults + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: executor-default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: executor +subjects: +- kind: ServiceAccount + name: default diff --git a/examples/sensors/amqp.yaml b/examples/sensors/amqp.yaml index a468d30a8b..4eaf060508 100644 --- a/examples/sensors/amqp.yaml +++ b/examples/sensors/amqp.yaml @@ -13,9 +13,6 @@ spec: - template: name: amqp-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/aws-lambda-trigger.yaml b/examples/sensors/aws-lambda-trigger.yaml index 03f139a09f..eafa52227f 100644 --- a/examples/sensors/aws-lambda-trigger.yaml +++ b/examples/sensors/aws-lambda-trigger.yaml @@ -12,18 +12,27 @@ spec: name: lambda-trigger awsLambda: functionName: hello - accessKey: - name: aws-secret - key: accesskey - secretKey: - name: aws-secret - key: secretkey region: us-east-1 payload: - src: dependencyName: test-dep dataKey: body.name dest: name + # Optional, possible values: RequestResponse, Event and DryRun # Defaults to RequestResponse, which means invoke the function synchronously. invocationType: Event + + # Optional, use if ServiceAccount doesn't have IAM Role assigned. + # More information on IAM roles for service accounts: + # https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html + accessKey: + name: aws-secret + key: accesskey + secretKey: + name: aws-secret + key: secretkey + + # Optional, use if your IAM user/role should assume another role to + # perform this action + roleARN: arn:aws:iam::123456789012:role/some-role diff --git a/examples/sensors/aws-sns.yaml b/examples/sensors/aws-sns.yaml index 9b46c6d8e0..6ca1fba8bc 100644 --- a/examples/sensors/aws-sns.yaml +++ b/examples/sensors/aws-sns.yaml @@ -13,9 +13,6 @@ spec: - template: name: sns-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -24,13 +21,37 @@ spec: metadata: generateName: aws-sns-worfklow- spec: - entrypoint: whalesay + entrypoint: main arguments: parameters: - name: message - # value will be overridden by the event payload - value: hello world templates: + - name: main + dag: + tasks: + - name: extract-message + template: extract-sns-message + arguments: + parameters: + - name: sns-message + value: "{{workflow.parameters.message}}" + - name: whalesay + dependencies: [extract-message] + template: whalesay + arguments: + parameters: + - name: message + value: "{{tasks.extract-message.outputs.result}}" + - name: extract-sns-message + inputs: + parameters: + - name: sns-message + script: + image: node:12-alpine + command: [node] + source: | + const snsMessage = {{inputs.parameters.sns-message}}; + console.log(snsMessage.Message); - name: whalesay inputs: parameters: @@ -42,4 +63,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/aws-sqs.yaml b/examples/sensors/aws-sqs.yaml index 4000550a7b..51dd487bde 100644 --- a/examples/sensors/aws-sqs.yaml +++ b/examples/sensors/aws-sqs.yaml @@ -13,9 +13,6 @@ spec: - template: name: sqs-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/azure-events-hub.yaml b/examples/sensors/azure-events-hub.yaml index e4c4633f88..01c8138aef 100644 --- a/examples/sensors/azure-events-hub.yaml +++ b/examples/sensors/azure-events-hub.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body.messsage dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/azure-queue-storage.yaml b/examples/sensors/azure-queue-storage.yaml new file mode 100644 index 0000000000..2c0df2c424 --- /dev/null +++ b/examples/sensors/azure-queue-storage.yaml @@ -0,0 +1,43 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: azure-queue-storage +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: azure-queue-storage + eventName: example + triggers: + - template: + name: azure-queue-storage-workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: azure-queue-storage-workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # this is the value that should be overridden + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/azure-service-bus-sensor.yaml b/examples/sensors/azure-service-bus-sensor.yaml new file mode 100644 index 0000000000..ee63472e14 --- /dev/null +++ b/examples/sensors/azure-service-bus-sensor.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: azure-service-bus +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + triggers: + - template: + name: azure-service-bus-trigger + azureServiceBus: + queueName: queue + connectionString: + name: azure-service-bus + key: connectionString + payload: + - src: + dependencyName: test-dep + dataKey: body.message + dest: message diff --git a/examples/sensors/azure-service-bus.yaml b/examples/sensors/azure-service-bus.yaml new file mode 100644 index 0000000000..0b2f1ce4a2 --- /dev/null +++ b/examples/sensors/azure-service-bus.yaml @@ -0,0 +1,43 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: azure-service-bus +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: azure-service-bus + eventName: example + triggers: + - template: + name: service-bus-workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: azure-service-bus-workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # this is the value that should be overridden + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/bitbucket.yaml b/examples/sensors/bitbucket.yaml new file mode 100644 index 0000000000..c365dfd4d2 --- /dev/null +++ b/examples/sensors/bitbucket.yaml @@ -0,0 +1,42 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: bitbucket +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: bitbucket + eventName: example + triggers: + - template: + name: bitbucket-workflow-trigger + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: bitbucket-workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/bitbucketserver.yaml b/examples/sensors/bitbucketserver.yaml new file mode 100644 index 0000000000..93b6175033 --- /dev/null +++ b/examples/sensors/bitbucketserver.yaml @@ -0,0 +1,42 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: bitbucketserver +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: bitbucketserver + eventName: example + triggers: + - template: + name: bitbucketserver-workflow-trigger + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: bitbucketserver-workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/calendar.yaml b/examples/sensors/calendar.yaml index 7d03d2993d..c433a0677b 100644 --- a/examples/sensors/calendar.yaml +++ b/examples/sensors/calendar.yaml @@ -13,9 +13,6 @@ spec: - template: name: calendar-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,6 +39,7 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: eventTime dest: spec.arguments.parameters.0.value retryStrategy: steps: 3 diff --git a/examples/sensors/complete-trigger-parameterization.yaml b/examples/sensors/complete-trigger-parameterization.yaml index 1ebfc73464..9bb6d9d990 100644 --- a/examples/sensors/complete-trigger-parameterization.yaml +++ b/examples/sensors/complete-trigger-parameterization.yaml @@ -31,9 +31,6 @@ spec: - template: name: THIS_WILL_BE_REPLACED_BY_NAME_FROM_EVENT k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: s3: diff --git a/examples/sensors/context-filter-webhook.yaml b/examples/sensors/context-filter-webhook.yaml index 1c9138129e..3c0960f7f1 100644 --- a/examples/sensors/context-filter-webhook.yaml +++ b/examples/sensors/context-filter-webhook.yaml @@ -20,9 +20,6 @@ spec: - template: name: done-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: s3: diff --git a/examples/sensors/dependencies-conditions.yaml b/examples/sensors/dependencies-conditions.yaml index fc24a22d06..3e55d3f0e1 100644 --- a/examples/sensors/dependencies-conditions.yaml +++ b/examples/sensors/dependencies-conditions.yaml @@ -18,11 +18,13 @@ spec: - template: # Boolean expression contains dependency names to determine whether to execute the trigger or not conditions: "test-dep" + conditionsReset: + - byTime: + # Reset conditions at 23:59 everyday + cron: "59 23 * * *" + timezone: "America/Los_Angeles" name: webhook-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -45,9 +47,6 @@ spec: conditions: "test-dep-foo" name: webhook-workflow-trigger-2 k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/email-trigger.yaml b/examples/sensors/email-trigger.yaml new file mode 100644 index 0000000000..32fca3f19b --- /dev/null +++ b/examples/sensors/email-trigger.yaml @@ -0,0 +1,33 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + triggers: + - parameters: + - src: + dependencyName: test-dep + dataKey: body.to + dest: email.to.-1 + - src: + dependencyName: test-dep + dataTemplate: "Hi {{.Input.body.name}},\n\n\tHello There.\n\nThanks,\nObi" + dest: email.body + template: + name: email-trigger + email: + username: username + smtpPassword: + key: password + name: smtp-secret + # to: + # - target1@email.com + # - target2@email.com + host: smtp.example.net + port: 587 + from: example@email.com + subject: Hello There diff --git a/examples/sensors/emitter.yaml b/examples/sensors/emitter.yaml index 9d5bfbda57..37636bacad 100644 --- a/examples/sensors/emitter.yaml +++ b/examples/sensors/emitter.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/file.yaml b/examples/sensors/file.yaml index 97e3734ad0..dedeea242b 100644 --- a/examples/sensors/file.yaml +++ b/examples/sensors/file.yaml @@ -13,9 +13,6 @@ spec: - template: name: file-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -37,6 +34,7 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: name dest: spec.templates.0.container.args.0 retryStrategy: steps: 3 diff --git a/examples/sensors/filter-script.yaml b/examples/sensors/filter-script.yaml new file mode 100644 index 0000000000..c64c220bc5 --- /dev/null +++ b/examples/sensors/filter-script.yaml @@ -0,0 +1,55 @@ +# Event Payload +# +# { +# "a": "b", +# "c": 10, +# "d": { +# "e": "z" +# } +# } +# + +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-script-filter +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + filters: + script: |- + if event.body.a == "b" and event.body.d.e == "z" then return true else return false end + triggers: + - template: + name: workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.arguments.parameters.0.value diff --git a/examples/tutorials/07-filters/sensor-context-filter.yaml b/examples/sensors/filter-with-context.yaml similarity index 90% rename from examples/tutorials/07-filters/sensor-context-filter.yaml rename to examples/sensors/filter-with-context.yaml index d125f74947..a17d57e992 100644 --- a/examples/tutorials/07-filters/sensor-context-filter.yaml +++ b/examples/sensors/filter-with-context.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Sensor metadata: - name: context-filter + name: with-ctx-filter spec: template: serviceAccountName: operate-workflow-sa @@ -11,14 +11,11 @@ spec: eventName: example filters: context: - source: webhook + source: custom-webhook triggers: - template: name: done-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/filter-with-data-advanced.yaml b/examples/sensors/filter-with-data-advanced.yaml new file mode 100644 index 0000000000..52899b8ca8 --- /dev/null +++ b/examples/sensors/filter-with-data-advanced.yaml @@ -0,0 +1,68 @@ +# Event Payload +# +# { +# "a": true, +# "b": { +# "c": 3.14, +# "d": "hello, world" +# } +# } +# + +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-advanced-data-filters +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + filters: + # If event payload passes ALL following data filters, the event is considered a valid event. + data: # result: EVENT PASS + - path: "a" # true + type: "bool" + value: + - "true" + - path: "b.c" # true + type: "number" + value: + - "3.14" + - path: "b.d" # true + type: "string" + value: + - "hello,world" + - "hello, world" + triggers: + - template: + name: workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.arguments.parameters.0.value diff --git a/examples/tutorials/07-filters/sensor-data-filters.yaml b/examples/sensors/filter-with-data-simple-1.yaml similarity index 93% rename from examples/tutorials/07-filters/sensor-data-filters.yaml rename to examples/sensors/filter-with-data-simple-1.yaml index 335d63c5b6..fe842ca5b8 100644 --- a/examples/tutorials/07-filters/sensor-data-filters.yaml +++ b/examples/sensors/filter-with-data-simple-1.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Sensor metadata: - name: data-filter + name: with-simple-data-filter-1 spec: template: serviceAccountName: operate-workflow-sa @@ -20,9 +20,6 @@ spec: - template: name: data-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/data-filter-webhook.yaml b/examples/sensors/filter-with-data-simple-2.yaml similarity index 93% rename from examples/sensors/data-filter-webhook.yaml rename to examples/sensors/filter-with-data-simple-2.yaml index a689a442a4..fd953fc88e 100644 --- a/examples/sensors/data-filter-webhook.yaml +++ b/examples/sensors/filter-with-data-simple-2.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Sensor metadata: - name: webhook + name: with-simple-data-filter-2 spec: template: serviceAccountName: operate-workflow-sa @@ -20,9 +20,6 @@ spec: - template: name: data-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -49,4 +46,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/data-filter-value-webhook.yaml b/examples/sensors/filter-with-data-simple-3.yaml similarity index 92% rename from examples/sensors/data-filter-value-webhook.yaml rename to examples/sensors/filter-with-data-simple-3.yaml index 83ca63b03e..93295ebaed 100644 --- a/examples/sensors/data-filter-value-webhook.yaml +++ b/examples/sensors/filter-with-data-simple-3.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Sensor metadata: - name: webhook + name: with-simple-data-filter-3 spec: template: serviceAccountName: operate-workflow-sa @@ -19,9 +19,6 @@ spec: - template: name: data-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/data-filter-comparator-webhook.yaml b/examples/sensors/filter-with-data-simple-4.yaml similarity index 93% rename from examples/sensors/data-filter-comparator-webhook.yaml rename to examples/sensors/filter-with-data-simple-4.yaml index 22efdcd8a6..12076d4444 100644 --- a/examples/sensors/data-filter-comparator-webhook.yaml +++ b/examples/sensors/filter-with-data-simple-4.yaml @@ -1,7 +1,7 @@ apiVersion: argoproj.io/v1alpha1 kind: Sensor metadata: - name: webhook + name: with-simple-data-filter-4 spec: template: serviceAccountName: operate-workflow-sa @@ -20,9 +20,6 @@ spec: - template: name: data-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/filter-with-expressions.yaml b/examples/sensors/filter-with-expressions.yaml index 03f50994fd..426262f769 100644 --- a/examples/sensors/filter-with-expressions.yaml +++ b/examples/sensors/filter-with-expressions.yaml @@ -2,9 +2,13 @@ # # { # "a": "b", -# "c": 10, -# "d": { -# "e": false +# "a-longer-name": 10, +# "nested": { +# "path": { +# "can-get": { +# "longer": false +# } +# } # } # } # @@ -19,32 +23,31 @@ spec: eventSourceName: webhook eventName: example filters: - # If event payload passes either one of the following filters, the event is considered a valid event. - exprs: - - expr: a == "b" || c == 10 + # If event payload passes ALL following expr filters, the event is considered a valid event. + exprs: # result: EVENT PASS + - expr: a == "b" || c == 10 # true + # In 'fields', 'name' works as a small alias used inside 'expr' above, + # while 'path' refers to a potentially-long JSON path in the payload. fields: - name: a path: a - name: c - path: c - - expr: e == false + path: a-longer-name + - expr: e == false # true fields: - name: e - path: d.e + path: nested.path.can-get.longer triggers: - template: name: workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - generateName: workflow- + generateName: expr-workflow- spec: entrypoint: whalesay arguments: @@ -64,4 +67,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: name dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/filter-with-multiple-data-filters-in-or.yaml b/examples/sensors/filter-with-multiple-data-filters-in-or.yaml new file mode 100644 index 0000000000..dcfe6d82aa --- /dev/null +++ b/examples/sensors/filter-with-multiple-data-filters-in-or.yaml @@ -0,0 +1,69 @@ +# Event Payload +# +# { +# "a": true, +# "b": { +# "c": 3.14, +# "d": "hello, world" +# } +# } +# + +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-multiple-data-filters-in-or +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + filters: + # If event payload passes either one of the following filters, the event is considered a valid event. + dataLogicalOperator: "or" + data: + - path: "a" # true + type: "bool" + value: + - "true" + - path: "b.c" # true + type: "number" + value: + - "3.14" + - path: "b.d" # FALSE + type: "string" + value: + - "hello there" + # result: EVENT PASS + triggers: + - template: + name: workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/filter-with-multiple-expr-filters-in-or.yaml b/examples/sensors/filter-with-multiple-expr-filters-in-or.yaml new file mode 100644 index 0000000000..31496cf44a --- /dev/null +++ b/examples/sensors/filter-with-multiple-expr-filters-in-or.yaml @@ -0,0 +1,73 @@ +# Event Payload +# +# { +# "a": "b", +# "a-longer-name": 10, +# "nested": { +# "path": { +# "can-get": { +# "longer": false +# } +# } +# } +# } +# + +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-multiple-expr-filters-in-or +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + filters: + # If event payload passes either one of the following filters, the event is considered a valid event. + exprLogicalOperator: "or" + exprs: + - expr: a == "b" || c != 10 # FALSE + # In 'fields', 'name' works as a small alias used inside 'expr' above, + # while 'path' refers to a potentially-long JSON path in the payload. + fields: + - name: a + path: a + - name: c + path: a-longer-name + - expr: e == false # true + fields: + - name: e + path: nested.path.can-get.longer + # result: EVENT PASS + triggers: + - template: + name: workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/filter-with-multiple-filters-in-or.yaml b/examples/sensors/filter-with-multiple-filters-in-or.yaml new file mode 100644 index 0000000000..afe701fbe6 --- /dev/null +++ b/examples/sensors/filter-with-multiple-filters-in-or.yaml @@ -0,0 +1,83 @@ +# Event Payload +# +# { +# "a": "b", +# "c": 10, +# "d": { +# "e": false +# } +# } +# + +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-multiple-filters-in-or +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + # If event payload passes either one of the following filters, the event is considered a valid event. + filtersLogicalOperator: "or" + filters: # global filters result: EVENT PASS + # If event payload passes either one of the following filters, the event is considered a valid event. + exprLogicalOperator: "or" + exprs: # expr filter result: EVENT PASS + - expr: a == "b" || c == 10 # true + fields: + - name: a + path: a + - name: c + path: c + - expr: e == false # true + fields: + - name: e + path: d.e + dataLogicalOperator: "or" + data: # data filter result: EVENT PASS + - path: "a" # true + type: "string" + value: + - "b" + - path: "c" # FALSE + type: "number" + comparator: ">" + value: + - "20" + - path: "d.e" # true + type: "bool" + value: + - "false" + triggers: + - template: + name: workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/filter-with-multiple-filters-mixed.yaml b/examples/sensors/filter-with-multiple-filters-mixed.yaml new file mode 100644 index 0000000000..519dc1ad50 --- /dev/null +++ b/examples/sensors/filter-with-multiple-filters-mixed.yaml @@ -0,0 +1,82 @@ +# Event Payload +# +# { +# "a": "b", +# "c": 10, +# "d": { +# "e": false +# } +# } +# + +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-mixed-multiple-filters +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + # If event payload passes either one of the following filters, the event is considered a valid event. + filtersLogicalOperator: "or" + filters: # global filters result: EVENT PASS + # default "exprLogicalOperator" = "and" + exprs: # expr filter result: EVENT PASS + - expr: a == "b" || c == 10 # true + fields: + - name: a + path: a + - name: c + path: c + - expr: e == false # true + fields: + - name: e + path: d.e + # default "dataLogicalOperator" = "and" + data: # data filter result: EVENT DOES NOT PASS + - path: "a" # true + type: "string" + value: + - "b" + - path: "c" # FALSE + type: "number" + comparator: ">" + value: + - "20" + - path: "d.e" # true + type: "bool" + value: + - "false" + triggers: + - template: + name: workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/filter-with-multiple-filters.yaml b/examples/sensors/filter-with-multiple-filters.yaml new file mode 100644 index 0000000000..019c66baab --- /dev/null +++ b/examples/sensors/filter-with-multiple-filters.yaml @@ -0,0 +1,81 @@ +# Event Payload +# +# { +# "a": "b", +# "c": 10, +# "d": { +# "e": false +# } +# } +# + +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-multiple-filters +spec: + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + # If event payload passes ALL following filters, the event is considered a valid event. + # default "filtersLogicalOperator" = "and" + filters: # global filters result: EVENT PASS + # default "exprLogicalOperator" = "and" + exprs: # expr filter result: EVENT PASS + - expr: a == "b" || c == 10 # true + fields: + - name: a + path: a + - name: c + path: c + - expr: e == false # true + fields: + - name: e + path: d.e + # default "dataLogicalOperator" = "and" + data: # data filter result: EVENT PASS + - path: "a" # true + type: "string" + value: + - "b" + - path: "c" # true + type: "number" + value: + - "10" + - path: "d.e" # true + type: "bool" + value: + - "false" + triggers: + - template: + name: workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/filter-with-time.yaml b/examples/sensors/filter-with-time.yaml new file mode 100644 index 0000000000..851583f359 --- /dev/null +++ b/examples/sensors/filter-with-time.yaml @@ -0,0 +1,47 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: with-time-filter +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + filters: + time: + start: "02:30:00" + stop: "04:30:00" + triggers: + - template: + name: done-workflow + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: data-workflow- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # value will get overridden by the event payload + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body.message + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/gcp-pubsub.yaml b/examples/sensors/gcp-pubsub.yaml index d385fc9f9f..ebeab72bb2 100644 --- a/examples/sensors/gcp-pubsub.yaml +++ b/examples/sensors/gcp-pubsub.yaml @@ -13,9 +13,6 @@ spec: - template: name: gcp-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/github.yaml b/examples/sensors/github.yaml index 174f54a7d4..0575cd644e 100644 --- a/examples/sensors/github.yaml +++ b/examples/sensors/github.yaml @@ -11,9 +11,9 @@ spec: eventName: example filters: data: - # Name of the event that triggered the delivery: [pull_request, push, yadayadayada] + # Type of Github event that triggered the delivery: [pull_request, push, issues, label, ...] # https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads - - path: body.X-GitHub-Event + - path: headers.X-Github-Event type: string value: - pull_request @@ -36,16 +36,13 @@ spec: - template: name: github-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: apiVersion: argoproj.io/v1alpha1 kind: Workflow metadata: - name: github- + generateName: github- spec: entrypoint: whalesay arguments: @@ -77,7 +74,7 @@ spec: dependencyName: test-dep dataTemplate: "{{ .Input.body.pull_request.head.sha | substr 0 7 }}" dest: spec.arguments.parameters.2.value - # Append pull request number and short sha to dynamically assign worklfow name + # Append pull request number and short sha to dynamically assign workflow name - src: dependencyName: test-dep dataTemplate: "{{ .Input.body.pull_request.number }}-{{ .Input.body.pull_request.head.sha | substr 0 7 }}" diff --git a/examples/sensors/gitlab.yaml b/examples/sensors/gitlab.yaml index fe109de156..af31aae6b2 100644 --- a/examples/sensors/gitlab.yaml +++ b/examples/sensors/gitlab.yaml @@ -13,9 +13,6 @@ spec: - template: name: gitlab-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/hdfs.yaml b/examples/sensors/hdfs.yaml index 3cb3824c41..57fd3ce06a 100644 --- a/examples/sensors/hdfs.yaml +++ b/examples/sensors/hdfs.yaml @@ -13,9 +13,6 @@ spec: - template: name: hdfs-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -36,4 +33,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.templates.0.container.args.1 diff --git a/examples/sensors/kafka-ttl.yaml b/examples/sensors/kafka-ttl.yaml new file mode 100644 index 0000000000..f3db777b4f --- /dev/null +++ b/examples/sensors/kafka-ttl.yaml @@ -0,0 +1,47 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: kafka +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: kafka + eventName: example + triggers: + - template: + name: kafka-workflow-trigger + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: kafka-workflow-with-ttl- + spec: + ttlStrategy: + secondsAfterCompletion: 10 # Time to live after workflow is completed, replaces ttlSecondsAfterFinished + secondsAfterSuccess: 5 # Time to live after workflow is successful + secondsAfterFailure: 5 # Time to live after workflow fails + entrypoint: whalesay + arguments: + parameters: + - name: message + # this is the value that should be overridden + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/kafka.yaml b/examples/sensors/kafka.yaml index 3303a488ba..20fc4374fe 100644 --- a/examples/sensors/kafka.yaml +++ b/examples/sensors/kafka.yaml @@ -13,9 +13,6 @@ spec: - template: name: kafka-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/minio.yaml b/examples/sensors/minio.yaml index 86e9a4489a..59bec87a50 100644 --- a/examples/sensors/minio.yaml +++ b/examples/sensors/minio.yaml @@ -13,9 +13,6 @@ spec: - template: name: minio-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -25,19 +22,26 @@ spec: generateName: artifact-workflow-2- spec: entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by event payload from test-dep + value: THIS_WILL_BE_REPLACED templates: - name: whalesay + inputs: + parameters: + - name: message container: command: - cowsay image: docker/whalesay:latest - args: - - THIS_WILL_BE_REPLACED + args: ["{{inputs.parameters.message}}"] # The container args from the workflow are overridden by the s3 notification key parameters: - src: dependencyName: test-dep dataKey: notification.0.s3.object.key - dest: spec.templates.0.container.args.0 + dest: spec.arguments.parameters.0.value retryStrategy: steps: 3 diff --git a/examples/sensors/mqtt-sensor.yaml b/examples/sensors/mqtt-sensor.yaml index 51cb400b75..543419fade 100644 --- a/examples/sensors/mqtt-sensor.yaml +++ b/examples/sensors/mqtt-sensor.yaml @@ -13,9 +13,6 @@ spec: - template: name: mqtt-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/multi-dependencies.yaml b/examples/sensors/multi-dependencies.yaml index 5da02dd81c..dd41ba3bd4 100644 --- a/examples/sensors/multi-dependencies.yaml +++ b/examples/sensors/multi-dependencies.yaml @@ -17,9 +17,6 @@ spec: - template: name: multi-dependencies-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -51,7 +48,9 @@ spec: parameters: - src: dependencyName: test-dependency-webhook + dataKey: body.a dest: spec.arguments.parameters.0.value - src: dependencyName: test-dependency-calendar + dataKey: eventTime dest: spec.arguments.parameters.1.value diff --git a/examples/sensors/multi-trigger-sensor.yaml b/examples/sensors/multi-trigger-sensor.yaml index e7bcd5f52b..c38c254d01 100644 --- a/examples/sensors/multi-trigger-sensor.yaml +++ b/examples/sensors/multi-trigger-sensor.yaml @@ -13,9 +13,6 @@ spec: - template: name: multi-trigger-workflow-1 k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: s3: @@ -33,9 +30,6 @@ spec: - template: name: multi-trigger-workflow-2 k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/nats.yaml b/examples/sensors/nats.yaml index d2bb151d41..4c73b9dcd2 100644 --- a/examples/sensors/nats.yaml +++ b/examples/sensors/nats.yaml @@ -13,9 +13,6 @@ spec: - template: name: nats-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/nsq.yaml b/examples/sensors/nsq.yaml index 0c9da6e209..885b0860a5 100644 --- a/examples/sensors/nsq.yaml +++ b/examples/sensors/nsq.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/pulsar.yaml b/examples/sensors/pulsar.yaml index 345e302845..68274c9eea 100644 --- a/examples/sensors/pulsar.yaml +++ b/examples/sensors/pulsar.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/redis-streams.yaml b/examples/sensors/redis-streams.yaml new file mode 100644 index 0000000000..d19e07cd6f --- /dev/null +++ b/examples/sensors/redis-streams.yaml @@ -0,0 +1,37 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: redis-stream +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: redis-stream + eventName: example + triggers: + - template: + name: workflow-trigger + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: redis-stream- + spec: + entrypoint: whalesay + templates: + - container: + args: + - "hello" # it will get replaced by the event payload + command: + - cowsay + image: "docker/whalesay:latest" + name: whalesay + parameters: + - src: + dependencyName: test-dep + dataKey: values + dest: spec.templates.0.container.args.0 \ No newline at end of file diff --git a/examples/sensors/redis.yaml b/examples/sensors/redis.yaml index a63c16fb09..c62a3691ae 100644 --- a/examples/sensors/redis.yaml +++ b/examples/sensors/redis.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -36,4 +33,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.templates.0.container.args.0 diff --git a/examples/sensors/resource.yaml b/examples/sensors/resource.yaml index 49d8dad088..de29faa476 100644 --- a/examples/sensors/resource.yaml +++ b/examples/sensors/resource.yaml @@ -13,9 +13,6 @@ spec: - template: name: argo-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -44,6 +41,7 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value retryStrategy: steps: 3 diff --git a/examples/sensors/sftp.yaml b/examples/sensors/sftp.yaml new file mode 100644 index 0000000000..03c2168d46 --- /dev/null +++ b/examples/sensors/sftp.yaml @@ -0,0 +1,40 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: sftp +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: sftp + eventName: example + triggers: + - template: + name: sftp-workflow-trigger + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: sftp-watcher- + spec: + entrypoint: whalesay + templates: + - + container: + args: + - "hello" # it will get replaced by the event payload + command: + - cowsay + image: "docker/whalesay:latest" + name: whalesay + parameters: + - src: + dependencyName: test-dep + dataKey: name + dest: spec.templates.0.container.args.0 + retryStrategy: + steps: 3 diff --git a/examples/sensors/slack-trigger.yaml b/examples/sensors/slack-trigger.yaml index 343c8f9f00..32436e8d67 100644 --- a/examples/sensors/slack-trigger.yaml +++ b/examples/sensors/slack-trigger.yaml @@ -8,15 +8,7 @@ spec: eventSourceName: webhook eventName: example triggers: - - template: - name: slack-trigger - slack: - channel: general - message: hello world - slackToken: - key: token - name: slack-secret - parameters: + - parameters: - src: dependencyName: test-dep dataKey: body.channel @@ -25,3 +17,107 @@ spec: dependencyName: test-dep dataKey: body.message dest: slack.message + template: + name: slack-trigger + slack: + slackToken: + key: token + name: slack-secret + channel: general + message: hello world + +# # For more info about Attachments API: https://api.slack.com/reference/messaging/attachments +# attachments: | +# [{ +# "title": "Attachment1!", +# "title_link": "https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/", +# "color": "#18be52", +# "fields": [{ +# "title": "Hello1", +# "value": "Hello World1", +# "short": true +# }, { +# "title": "Hello2", +# "value": "Hello World2", +# "short": true +# }] +# }, { +# "title": "Attachment2!", +# "title_link": "https://argoproj.github.io/argo-events/sensors/triggers/slack-trigger/", +# "color": "#18be52", +# "fields": [{ +# "title": "Hello1", +# "value": "Hello World1", +# "short": true +# }, { +# "title": "Hello2", +# "value": "Hello World2", +# "short": true +# }] +# }] +# +# # For more info about Blocks API: https://api.slack.com/reference/block-kit/blocks +# blocks: | +# [{ +# "type": "actions", +# "block_id": "actionblock789", +# "elements": [{ +# "type": "datepicker", +# "action_id": "datepicker123", +# "initial_date": "1990-04-28", +# "placeholder": { +# "type": "plain_text", +# "text": "Select a date" +# } +# }, +# { +# "type": "overflow", +# "options": [{ +# "text": { +# "type": "plain_text", +# "text": "*this is plain_text text*" +# }, +# "value": "value-0" +# }, +# { +# "text": { +# "type": "plain_text", +# "text": "*this is plain_text text*" +# }, +# "value": "value-1" +# }, +# { +# "text": { +# "type": "plain_text", +# "text": "*this is plain_text text*" +# }, +# "value": "value-2" +# }, +# { +# "text": { +# "type": "plain_text", +# "text": "*this is plain_text text*" +# }, +# "value": "value-3" +# }, +# { +# "text": { +# "type": "plain_text", +# "text": "*this is plain_text text*" +# }, +# "value": "value-4" +# } +# ], +# "action_id": "overflow" +# }, +# { +# "type": "button", +# "text": { +# "type": "plain_text", +# "text": "Click Me" +# }, +# "value": "click_me_123", +# "action_id": "button" +# } +# ] +# }] \ No newline at end of file diff --git a/examples/sensors/slack.yaml b/examples/sensors/slack.yaml index 6aaaf258c7..d3f4a3f374 100644 --- a/examples/sensors/slack.yaml +++ b/examples/sensors/slack.yaml @@ -13,9 +13,6 @@ spec: - template: name: slack-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body.message dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/specia-workflow-trigger-submit-existing.yml b/examples/sensors/specia-workflow-trigger-submit-existing.yml new file mode 100644 index 0000000000..81fbaed4c8 --- /dev/null +++ b/examples/sensors/specia-workflow-trigger-submit-existing.yml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + triggers: + - template: + name: argo-workflow-trigger + argoWorkflow: + operation: submit-from + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: CronWorkflow + metadata: + name: special-trigger diff --git a/examples/sensors/special-workflow-trigger-shortened.yaml b/examples/sensors/special-workflow-trigger-shortened.yaml new file mode 100644 index 0000000000..e0bd2582e9 --- /dev/null +++ b/examples/sensors/special-workflow-trigger-shortened.yaml @@ -0,0 +1,43 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + triggers: + - template: + name: argo-workflow-trigger + argoWorkflow: + operation: submit + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + name: special-trigger + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by event payload from test-dep + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/special-workflow-trigger.yaml b/examples/sensors/special-workflow-trigger.yaml index b20fd29edd..1087b2394e 100644 --- a/examples/sensors/special-workflow-trigger.yaml +++ b/examples/sensors/special-workflow-trigger.yaml @@ -13,10 +13,10 @@ spec: - template: name: argo-workflow-trigger argoWorkflow: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: submit + args: + - --node-field-selector + - phase=abc source: resource: apiVersion: argoproj.io/v1alpha1 @@ -42,4 +42,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/storage-grid.yaml b/examples/sensors/storage-grid.yaml index f0094029d2..83d1118a95 100644 --- a/examples/sensors/storage-grid.yaml +++ b/examples/sensors/storage-grid.yaml @@ -13,9 +13,6 @@ spec: - template: name: argo-workflow k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/stripe.yaml b/examples/sensors/stripe.yaml index 27c2d5b81d..5191f7ccae 100644 --- a/examples/sensors/stripe.yaml +++ b/examples/sensors/stripe.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/time-filter-webhook.yaml b/examples/sensors/time-filter-webhook.yaml index 16e102ef0d..c4ec4d68c2 100644 --- a/examples/sensors/time-filter-webhook.yaml +++ b/examples/sensors/time-filter-webhook.yaml @@ -26,9 +26,6 @@ spec: - template: name: hello-world-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/transform.yaml b/examples/sensors/transform.yaml new file mode 100644 index 0000000000..1c08d49b8e --- /dev/null +++ b/examples/sensors/transform.yaml @@ -0,0 +1,85 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + transform: + script: |- + event.body.message='updated' + return event + - name: test-dep-foo + eventSourceName: webhook + eventName: example-foo + transform: + jq: ".body.message *= 2" + triggers: + - template: + name: webhook-workflow-trigger + conditions: "test-dep" + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: webhook- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by event payload from test-dep + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value + - template: + name: webhook-workflow-trigger-1 + conditions: "test-dep-foo" + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: webhook- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by event payload from test-dep + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep-foo + dataKey: body + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/trigger-source-configmap.yaml b/examples/sensors/trigger-source-configmap.yaml index 015ad8dd31..33703c0f65 100644 --- a/examples/sensors/trigger-source-configmap.yaml +++ b/examples/sensors/trigger-source-configmap.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: configmap: diff --git a/examples/sensors/trigger-source-file.yaml b/examples/sensors/trigger-source-file.yaml index c32d72cf47..cab246b722 100644 --- a/examples/sensors/trigger-source-file.yaml +++ b/examples/sensors/trigger-source-file.yaml @@ -21,9 +21,6 @@ spec: - template: name: file-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: file: diff --git a/examples/sensors/trigger-source-git.yaml b/examples/sensors/trigger-source-git.yaml index 36087a2cfa..5dbb06949a 100644 --- a/examples/sensors/trigger-source-git.yaml +++ b/examples/sensors/trigger-source-git.yaml @@ -24,9 +24,6 @@ spec: - template: name: workflow-trigger-1 k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: git: @@ -44,9 +41,6 @@ spec: - template: name: workflow-trigger-2 k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: git: diff --git a/examples/sensors/trigger-standard-k8s-resource-shortened.yaml b/examples/sensors/trigger-standard-k8s-resource-shortened.yaml new file mode 100644 index 0000000000..1e7fcb44d9 --- /dev/null +++ b/examples/sensors/trigger-standard-k8s-resource-shortened.yaml @@ -0,0 +1,62 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + triggers: + - template: + name: webhook-pod-trigger + k8s: + operation: create + source: + resource: + apiVersion: v1 + kind: Pod + metadata: + generateName: hello-world- + spec: + containers: + - name: hello-container + args: + - "hello-world" + command: + - cowsay + image: "docker/whalesay:latest" + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.containers.0.args.0 +# - template: +# name: webhook-deployment-trigger +# k8s: +# operation: create +# source: +# resource: +# apiVersion: apps/v1 +# kind: Deployment +# metadata: +# generateName: hello-world- +# spec: +# replicas: 1 +# selector: +# matchLabels: +# app: mydeploy +# template: +# metadata: +# labels: +# app: mydeploy +# spec: +# containers: +# - name: hello-container +# args: +# - "hello world" +# command: +# - cowsay +# image: "docker/whalesay:latest" diff --git a/examples/sensors/trigger-standard-k8s-resource.yaml b/examples/sensors/trigger-standard-k8s-resource.yaml index a97e5cfad1..1e7fcb44d9 100644 --- a/examples/sensors/trigger-standard-k8s-resource.yaml +++ b/examples/sensors/trigger-standard-k8s-resource.yaml @@ -13,9 +13,6 @@ spec: - template: name: webhook-pod-trigger k8s: - group: "" - version: v1 - resource: pods operation: create source: resource: @@ -34,13 +31,11 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.containers.0.args.0 # - template: # name: webhook-deployment-trigger # k8s: -# group: apps -# version: v1 -# resource: deployments # operation: create # source: # resource: diff --git a/examples/sensors/trigger-with-atleast-once-semantics.yaml b/examples/sensors/trigger-with-atleast-once-semantics.yaml new file mode 100644 index 0000000000..d4cd3c92af --- /dev/null +++ b/examples/sensors/trigger-with-atleast-once-semantics.yaml @@ -0,0 +1,46 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + triggers: + - template: + name: trigger-1 + # trigger execution semantics is at-most-once by default + # can be changed to at-least-once by setting atLeastOnce: true + atLeastOnce: true + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: webhook- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by the event payload from test-dep + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value \ No newline at end of file diff --git a/examples/sensors/trigger-with-policy.yaml b/examples/sensors/trigger-with-policy.yaml index e0d365da14..06514c89cf 100644 --- a/examples/sensors/trigger-with-policy.yaml +++ b/examples/sensors/trigger-with-policy.yaml @@ -41,9 +41,6 @@ spec: errorOnBackoffTimeout: true k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -70,4 +67,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/trigger-with-template.yaml b/examples/sensors/trigger-with-template.yaml index 1ecb2e80cd..86e72ddb11 100644 --- a/examples/sensors/trigger-with-template.yaml +++ b/examples/sensors/trigger-with-template.yaml @@ -13,9 +13,6 @@ spec: - template: name: templated-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: diff --git a/examples/sensors/url-sensor.yaml b/examples/sensors/url-sensor.yaml index b8675ae24a..d6671c73da 100644 --- a/examples/sensors/url-sensor.yaml +++ b/examples/sensors/url-sensor.yaml @@ -13,13 +13,10 @@ spec: - template: name: url-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: url: - path: "https://raw.githubusercontent.com/argoproj/argo/master/examples/hello-world.yaml" + path: "https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-world.yaml" verifycert: false retryStrategy: steps: 3 diff --git a/examples/sensors/webhook-shortened.yaml b/examples/sensors/webhook-shortened.yaml new file mode 100644 index 0000000000..842ea21f3a --- /dev/null +++ b/examples/sensors/webhook-shortened.yaml @@ -0,0 +1,43 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: webhook +spec: + template: + serviceAccountName: operate-workflow-sa + dependencies: + - name: test-dep + eventSourceName: webhook + eventName: example + triggers: + - template: + name: webhook-workflow-trigger + k8s: + operation: create + source: + resource: + apiVersion: argoproj.io/v1alpha1 + kind: Workflow + metadata: + generateName: webhook- + spec: + entrypoint: whalesay + arguments: + parameters: + - name: message + # the value will get overridden by event payload from test-dep + value: hello world + templates: + - name: whalesay + inputs: + parameters: + - name: message + container: + image: docker/whalesay:latest + command: [cowsay] + args: ["{{inputs.parameters.message}}"] + parameters: + - src: + dependencyName: test-dep + dataKey: body + dest: spec.arguments.parameters.0.value diff --git a/examples/sensors/webhook.yaml b/examples/sensors/webhook.yaml index 79a3d988b9..842ea21f3a 100644 --- a/examples/sensors/webhook.yaml +++ b/examples/sensors/webhook.yaml @@ -13,9 +13,6 @@ spec: - template: name: webhook-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -42,4 +39,5 @@ spec: parameters: - src: dependencyName: test-dep + dataKey: body dest: spec.arguments.parameters.0.value diff --git a/examples/tutorials/02-parameterization/sensor-01.yaml b/examples/tutorials/02-parameterization/sensor-01.yaml index bd46a341e3..bcf09045b9 100644 --- a/examples/tutorials/02-parameterization/sensor-01.yaml +++ b/examples/tutorials/02-parameterization/sensor-01.yaml @@ -12,11 +12,8 @@ spec: triggers: - template: name: webhook-workflow-trigger - k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows - operation: create + argoWorkflow: + operation: submit source: resource: apiVersion: argoproj.io/v1alpha1 diff --git a/examples/tutorials/02-parameterization/sensor-02.yaml b/examples/tutorials/02-parameterization/sensor-02.yaml index 90dec17118..488ba93ce2 100644 --- a/examples/tutorials/02-parameterization/sensor-02.yaml +++ b/examples/tutorials/02-parameterization/sensor-02.yaml @@ -12,11 +12,8 @@ spec: triggers: - template: name: webhook-workflow-trigger - k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows - operation: create + argoWorkflow: + operation: submit source: resource: apiVersion: argoproj.io/v1alpha1 diff --git a/examples/tutorials/02-parameterization/sensor-03.yaml b/examples/tutorials/02-parameterization/sensor-03.yaml index 1caf3b2c40..3e633e403f 100644 --- a/examples/tutorials/02-parameterization/sensor-03.yaml +++ b/examples/tutorials/02-parameterization/sensor-03.yaml @@ -12,11 +12,8 @@ spec: triggers: - template: name: webhook-workflow-trigger - k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows - operation: create + argoWorkflow: + operation: submit source: resource: apiVersion: argoproj.io/v1alpha1 diff --git a/examples/tutorials/02-parameterization/sensor-04.yaml b/examples/tutorials/02-parameterization/sensor-04.yaml index 532cfb79b3..0f66e10d86 100644 --- a/examples/tutorials/02-parameterization/sensor-04.yaml +++ b/examples/tutorials/02-parameterization/sensor-04.yaml @@ -12,11 +12,8 @@ spec: triggers: - template: name: webhook-workflow-trigger - k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows - operation: create + argoWorkflow: + operation: submit source: resource: apiVersion: argoproj.io/v1alpha1 diff --git a/examples/tutorials/02-parameterization/sensor-05.yaml b/examples/tutorials/02-parameterization/sensor-05.yaml index 077ce370af..7663b4c184 100644 --- a/examples/tutorials/02-parameterization/sensor-05.yaml +++ b/examples/tutorials/02-parameterization/sensor-05.yaml @@ -12,11 +12,8 @@ spec: triggers: - template: name: webhook-workflow-trigger - k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows - operation: create + argoWorkflow: + operation: submit source: resource: apiVersion: argoproj.io/v1alpha1 @@ -44,16 +41,16 @@ spec: dependencyName: THIS_WILL_BE_REPLACED dataKey: THIS_WILL_BE_REPLACED dest: THIS_WILL_BE_REPLACED - parameters: - - src: - dependencyName: test-dep - dataKey: body.dependencyName - dest: k8s.parameters.0.src.dependencyName - - src: - dependencyName: test-dep - dataKey: body.dataKey - dest: k8s.parameters.0.src.dataKey - - src: - dependencyName: test-dep - dataKey: dest - dest: k8s.parameters.0.dest + parameters: + - src: + dependencyName: test-dep + dataKey: body.dependencyName + dest: k8s.parameters.0.src.dependencyName + - src: + dependencyName: test-dep + dataKey: body.dataKey + dest: k8s.parameters.0.src.dataKey + - src: + dependencyName: test-dep + dataKey: body.dest + dest: k8s.parameters.0.dest diff --git a/examples/tutorials/03-trigger-sources/sensor-cm.yaml b/examples/tutorials/03-trigger-sources/sensor-cm.yaml index 10c219367d..bb87c768af 100644 --- a/examples/tutorials/03-trigger-sources/sensor-cm.yaml +++ b/examples/tutorials/03-trigger-sources/sensor-cm.yaml @@ -13,9 +13,6 @@ spec: - template: name: minio-workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: configmap: diff --git a/examples/tutorials/03-trigger-sources/sensor-git.yaml b/examples/tutorials/03-trigger-sources/sensor-git.yaml index e91dc412f9..cae15da34a 100644 --- a/examples/tutorials/03-trigger-sources/sensor-git.yaml +++ b/examples/tutorials/03-trigger-sources/sensor-git.yaml @@ -33,13 +33,10 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: git: - url: "git@github.com:argoproj/argo.git" + url: "git@github.com:argoproj/argo-workflows.git" cloneDirectory: "/git/argoproj" sshKeyPath: "/secret/key" namespace: argo-events diff --git a/examples/tutorials/03-trigger-sources/sensor-minio.yaml b/examples/tutorials/03-trigger-sources/sensor-minio.yaml index 880073e7be..a7ebf7b960 100644 --- a/examples/tutorials/03-trigger-sources/sensor-minio.yaml +++ b/examples/tutorials/03-trigger-sources/sensor-minio.yaml @@ -13,9 +13,6 @@ spec: - template: name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: s3: diff --git a/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml b/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml index 23e9cafcb2..2108878a0f 100644 --- a/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml +++ b/examples/tutorials/04-standard-k8s-resources/sensor-deployment.yaml @@ -13,9 +13,6 @@ spec: - template: name: webhook-deployment-trigger k8s: - group: apps - version: v1 - resource: deployments operation: create source: resource: diff --git a/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml b/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml index 8e0eaf1e92..f26c97dd16 100644 --- a/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml +++ b/examples/tutorials/04-standard-k8s-resources/sensor-pod.yaml @@ -13,9 +13,6 @@ spec: - template: name: webhook-pod-trigger k8s: - group: "" - version: v1 - resource: pods operation: create source: resource: diff --git a/examples/tutorials/06-trigger-conditions/sensor-01.yaml b/examples/tutorials/06-trigger-conditions/sensor-01.yaml index b9ae5f01df..71946629c2 100644 --- a/examples/tutorials/06-trigger-conditions/sensor-01.yaml +++ b/examples/tutorials/06-trigger-conditions/sensor-01.yaml @@ -20,9 +20,6 @@ spec: conditions: "test-dep-webhook" name: workflow-trigger-1 k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -49,9 +46,6 @@ spec: conditions: "test-dep-minio" name: workflow-trigger-2 k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -72,5 +66,5 @@ spec: parameters: - src: dependencyName: test-dep-minio - dataKey: s3.bucket.name + dataKey: notification.0.s3.bucket.name dest: spec.templates.0.container.args.0 diff --git a/examples/tutorials/06-trigger-conditions/sensor-02.yaml b/examples/tutorials/06-trigger-conditions/sensor-02.yaml index 3f904eb579..70f8d3ec73 100644 --- a/examples/tutorials/06-trigger-conditions/sensor-02.yaml +++ b/examples/tutorials/06-trigger-conditions/sensor-02.yaml @@ -18,9 +18,6 @@ spec: conditions: "test-dep-webhook && test-dep-minio" name: workflow-trigger k8s: - group: argoproj.io - version: v1alpha1 - resource: workflows operation: create source: resource: @@ -45,5 +42,5 @@ spec: dest: spec.templates.0.container.args.0 - src: dependencyName: test-dep-minio - dataKey: s3.bucket.name + dataKey: notification.0.s3.bucket.name dest: spec.templates.0.container.args.1 diff --git a/go.mod b/go.mod index 5ac9ffb47b..16326f0237 100644 --- a/go.mod +++ b/go.mod @@ -1,150 +1,341 @@ module github.com/argoproj/argo-events -go 1.15 +go 1.21 + +retract v1.15.1 // Contains retractions only. + +retract v1.15.0 // Published accidentally. require ( - cloud.google.com/go v0.52.0 - cloud.google.com/go/pubsub v1.2.0 - github.com/Azure/azure-amqp-common-go/v3 v3.1.0 // indirect - github.com/Azure/azure-event-hubs-go/v3 v3.3.7 - github.com/Azure/azure-sdk-for-go v52.6.0+incompatible // indirect - github.com/Azure/go-amqp v0.13.6 // indirect + cloud.google.com/go/compute/metadata v0.3.0 + cloud.google.com/go/pubsub v1.38.0 + github.com/AdaLogics/go-fuzz-headers v0.0.0-20221103172237-443f56ff4ba8 + github.com/Azure/azure-event-hubs-go/v3 v3.6.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 + github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0 + github.com/IBM/sarama v1.43.0 github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible - github.com/Masterminds/sprig/v3 v3.2.0 - github.com/Shopify/sarama v1.26.1 - github.com/ahmetb/gen-crd-api-reference-docs v0.2.0 - github.com/antonmedv/expr v1.8.8 + github.com/Masterminds/sprig/v3 v3.2.3 + github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 + github.com/andygrunwald/go-gerrit v0.0.0-20230325081502-da63a5c62d80 + github.com/antonmedv/expr v1.15.5 github.com/apache/openwhisk-client-go v0.0.0-20190915054138-716c6f973eb2 - github.com/apache/pulsar-client-go v0.1.1 - github.com/argoproj/pkg v0.9.0 - github.com/aws/aws-sdk-go v1.33.16 + github.com/apache/pulsar-client-go v0.12.0 + github.com/argoproj/notifications-engine v0.4.0 + github.com/argoproj/pkg v0.13.6 + github.com/aws/aws-sdk-go v1.44.209 github.com/blushft/go-diagrams v0.0.0-20201006005127-c78c821223d9 - github.com/cloudevents/sdk-go/v2 v2.1.0 - github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 + github.com/cloudevents/sdk-go/v2 v2.15.2 github.com/colinmarc/hdfs v1.1.4-0.20180802165501-48eb8d6c34a9 - github.com/eclipse/paho.mqtt.golang v1.2.0 - github.com/emicklei/go-restful v2.12.0+incompatible // indirect + github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3 + github.com/eclipse/paho.mqtt.golang v1.4.3 github.com/emitter-io/go/v2 v2.0.9 - github.com/fatih/color v1.9.0 // indirect - github.com/fsnotify/fsnotify v1.4.9 - github.com/gavv/httpexpect/v2 v2.2.0 + github.com/fsnotify/fsnotify v1.7.0 + github.com/gavv/httpexpect/v2 v2.16.0 + github.com/gfleury/go-bitbucket-v1 v0.0.0-20210707202713-7d616f7c18ac github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-git/go-git/v5 v5.3.0 - github.com/go-openapi/inflect v0.19.0 - github.com/go-openapi/spec v0.20.2 - github.com/go-redis/redis v6.15.8+incompatible - github.com/go-resty/resty/v2 v2.3.0 - github.com/go-swagger/go-swagger v0.25.0 + github.com/go-git/go-git/v5 v5.12.0 + github.com/go-openapi/inflect v0.21.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/go-resty/resty/v2 v2.13.1 + github.com/go-swagger/go-swagger v0.31.0 github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.4.3 - github.com/google/go-cmp v0.5.2 - github.com/google/go-github/v31 v31.0.0 - github.com/google/uuid v1.1.2 - github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect - github.com/gorilla/mux v1.7.3 - github.com/grpc-ecosystem/grpc-gateway v1.14.6 - github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e // indirect - github.com/imdario/mergo v0.3.12 + github.com/golang/protobuf v1.5.4 + github.com/google/go-cmp v0.6.0 + github.com/google/go-github/v50 v50.2.0 + github.com/google/uuid v1.6.0 + github.com/gorilla/mux v1.8.1 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/hamba/avro v1.8.0 + github.com/imdario/mergo v0.3.16 + github.com/itchyny/gojq v0.12.16 github.com/joncalhoun/qson v0.0.0-20200422171543-84433dcd3da0 - github.com/mattn/go-colorable v0.1.6 // indirect - github.com/minio/minio-go v1.0.1-0.20190523192347-c6c2912aa552 - github.com/mitchellh/mapstructure v1.4.1 // indirect - github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/nats-io/gnatsd v1.4.1 // indirect - github.com/nats-io/go-nats v1.7.2 - github.com/nats-io/graft v0.0.0-20200605173148-348798afea05 - github.com/nats-io/nats-streaming-server v0.17.0 // indirect - github.com/nats-io/nats.go v1.10.0 - github.com/nats-io/stan.go v0.6.0 - github.com/nicksnyder/go-i18n v1.10.1-0.20190510212457-b280125b035a // indirect - github.com/nsqio/go-nsq v1.0.8 - github.com/pierrec/lz4 v2.5.0+incompatible // indirect + github.com/ktrysmt/go-bitbucket v0.9.80 + github.com/minio/minio-go/v7 v7.0.70 + github.com/mitchellh/hashstructure/v2 v2.0.2 + github.com/mitchellh/mapstructure v1.5.0 + github.com/nats-io/graft v0.0.0-20220215174245-93d18541496f + github.com/nats-io/nats.go v1.35.0 + github.com/nats-io/stan.go v0.10.4 + github.com/nsqio/go-nsq v1.1.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.9.0 + github.com/pkg/sftp v1.13.6 + github.com/prometheus/client_golang v1.19.1 + github.com/rabbitmq/amqp091-go v1.10.0 github.com/radovskyb/watcher v1.0.7 - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect - github.com/robfig/cron v1.2.0 - github.com/slack-go/slack v0.7.4 - github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3 // indirect - github.com/smartystreets/goconvey v1.6.4 - github.com/spf13/cobra v1.0.0 - github.com/spf13/viper v1.7.0 - github.com/streadway/amqp v1.0.0 - github.com/stretchr/testify v1.6.1 + github.com/riferrei/srclient v0.5.4 + github.com/robfig/cron/v3 v3.0.1 + github.com/slack-go/slack v0.13.0 + github.com/smartystreets/goconvey v1.7.2 + github.com/spf13/cobra v1.8.0 + github.com/spf13/viper v1.19.0 + github.com/stretchr/testify v1.9.0 github.com/stripe/stripe-go v70.15.0+incompatible - github.com/tidwall/gjson v1.7.5 - github.com/tidwall/sjson v1.1.1 - github.com/xanzy/go-gitlab v0.33.0 - go.uber.org/zap v1.15.0 - golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 - golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect - google.golang.org/api v0.15.1 - google.golang.org/grpc v1.29.1 - gopkg.in/jcmturner/goidentity.v2 v2.0.0 // indirect + github.com/tidwall/gjson v1.17.1 + github.com/tidwall/sjson v1.2.4 + github.com/xanzy/go-gitlab v0.105.0 + github.com/xdg-go/scram v1.1.2 + github.com/yuin/gopher-lua v1.1.1 + go.uber.org/ratelimit v0.3.1 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.24.0 + golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f + google.golang.org/api v0.181.0 + google.golang.org/grpc v1.63.2 gopkg.in/jcmturner/gokrb5.v5 v5.3.0 - gopkg.in/jcmturner/rpc.v0 v0.0.2 // indirect - honnef.co/go/tools v0.0.1-2020.1.3 // indirect - k8s.io/api v0.19.6 - k8s.io/apimachinery v0.19.6 - k8s.io/client-go v0.19.6 - k8s.io/code-generator v0.19.6 - k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 - k8s.io/klog v0.3.0 // indirect - k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 - sigs.k8s.io/controller-runtime v0.7.0 - sigs.k8s.io/controller-tools v0.4.1 - sigs.k8s.io/yaml v1.2.0 + k8s.io/api v0.29.2 + k8s.io/apimachinery v0.29.2 + k8s.io/client-go v0.29.0 + k8s.io/code-generator v0.29.2 + k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 + k8s.io/klog/v2 v2.110.1 + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 + k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/controller-runtime v0.17.2 + sigs.k8s.io/controller-tools v0.8.0 + sigs.k8s.io/yaml v1.4.0 ) -replace k8s.io/api => k8s.io/api v0.19.6 - -replace k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.19.6 - -replace k8s.io/apimachinery => k8s.io/apimachinery v0.19.7-rc.0 - -replace k8s.io/apiserver => k8s.io/apiserver v0.19.6 - -replace k8s.io/cli-runtime => k8s.io/cli-runtime v0.19.6 - -replace k8s.io/client-go => k8s.io/client-go v0.19.6 - -replace k8s.io/cloud-provider => k8s.io/cloud-provider v0.19.6 - -replace k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.19.6 - -replace k8s.io/component-base => k8s.io/component-base v0.19.6 - -replace k8s.io/cri-api => k8s.io/cri-api v0.19.7-rc.0 - -replace k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.19.6 - -replace k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.19.6 - -replace k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.19.6 - -replace k8s.io/kube-proxy => k8s.io/kube-proxy v0.19.6 - -replace k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.19.6 - -replace k8s.io/kubectl => k8s.io/kubectl v0.19.6 - -replace k8s.io/kubelet => k8s.io/kubelet v0.19.6 - -replace k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.19.6 - -replace k8s.io/metrics => k8s.io/metrics v0.19.6 - -replace k8s.io/node-api => k8s.io/node-api v0.17.5 - -replace k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.19.6 - -replace k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.19.6 - -replace k8s.io/sample-controller => k8s.io/sample-controller v0.19.6 - -replace github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible - -replace k8s.io/code-generator => k8s.io/code-generator v0.19.7-rc.0 +require ( + cloud.google.com/go v0.113.0 // indirect + cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/iam v1.1.7 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.1 // indirect + github.com/AthenZ/athenz v1.10.39 // indirect + github.com/Azure/azure-amqp-common-go/v4 v4.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect + github.com/DataDog/zstd v1.5.0 // indirect + github.com/PagerDuty/go-pagerduty v1.6.0 // indirect + github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 // indirect + github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 // indirect + github.com/andybalholm/brotli v1.0.4 // indirect + github.com/bits-and-blooms/bitset v1.4.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/golang-jwt/jwt v3.2.2+incompatible // indirect + github.com/golang-jwt/jwt/v5 v5.2.1 // indirect + github.com/google/go-github/v41 v41.0.0 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/gregdel/pushover v1.1.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/linkedin/goavro/v2 v2.11.1 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 // indirect + github.com/opsgenie/opsgenie-go-sdk-v2 v1.0.5 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect + github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sanity-io/litter v1.5.5 // indirect + github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 // indirect + github.com/skeema/knownhosts v1.2.2 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/stringprep v1.0.4 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 // indirect + gomodules.xyz/notify v0.1.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect + gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc // indirect + gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df // indirect + moul.io/http2curl/v2 v2.3.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect +) -replace k8s.io/controller-manager => k8s.io/controller-manager v0.19.7-rc.0 +require ( + github.com/Azure/azure-sdk-for-go v65.0.0+incompatible // indirect + github.com/Azure/go-amqp v1.0.5 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.28 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect + github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect + github.com/ProtonMail/go-crypto v1.0.0 // indirect + github.com/ajg/form v1.5.1 // indirect + github.com/ardielle/ardielle-go v1.5.2 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/awalterschulze/gographviz v0.0.0-20200901124122-0eecad45bd71 // indirect + github.com/benbjohnson/clock v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 // indirect + github.com/danielm-codefresh/argo-multi-cluster v0.0.0-20220327145759-1c387c8ebc5f + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/devigned/tab v0.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/emirpasic/gods v1.18.1 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/fatih/color v1.15.0 // indirect + github.com/fatih/structs v1.1.0 // indirect + github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.5.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/gobuffalo/flect v0.2.3 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-github/v62 v62.0.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 // indirect + github.com/gorilla/handlers v1.5.2 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-retryablehttp v0.7.2 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imkira/go-interpol v1.1.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/itchyny/timefmt-go v0.1.6 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/jessevdk/go-flags v1.5.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/jpillora/backoff v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/kevinburke/ssh_config v1.2.0 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/spdystream v0.2.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/nats-io/nats-server/v2 v2.9.23 // indirect + github.com/nats-io/nats-streaming-server v0.24.3 // indirect + github.com/nats-io/nkeys v0.4.7 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/nicksnyder/go-i18n v1.10.1-0.20190510212457-b280125b035a // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pierrec/lz4 v2.6.1+incompatible // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rs/xid v1.5.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/smartystreets/assertions v1.2.0 // indirect + github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + github.com/toqueteos/webbrowser v1.2.0 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/fasthttp v1.34.0 // indirect + github.com/xanzy/ssh-agent v0.3.3 // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 // indirect + github.com/yudai/gojsondiff v1.0.0 // indirect + github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 // indirect + go.mongodb.org/mongo-driver v1.14.0 // indirect + go.opencensus.io v0.24.0 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/protobuf v1.34.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect + gopkg.in/jcmturner/dnsutils.v1 v1.0.1 // indirect + gopkg.in/jcmturner/goidentity.v2 v2.0.0 // indirect + gopkg.in/jcmturner/rpc.v0 v0.0.2 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.29.0 // indirect + k8s.io/component-base v0.29.0 // indirect + k8s.io/klog v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) diff --git a/go.sum b/go.sum index 8673c6e70b..342bfc6315 100644 --- a/go.sum +++ b/go.sum @@ -1,468 +1,376 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= -cloud.google.com/go v0.52.0 h1:GGslhk/BU052LPlnI1vpp3fcbUs+hQ3E+Doti/3/vF8= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA= +cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8= +cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= +cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= +cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= +cloud.google.com/go/pubsub v1.38.0 h1:J1OT7h51ifATIedjqk/uBNPh+1hkvUaH4VKbz4UuAsc= +cloud.google.com/go/pubsub v1.38.0/go.mod h1:IPMJSWSus/cu57UyR01Jqa/bNOQA+XnPF6Z4dKW4fAA= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= -github.com/Azure/azure-amqp-common-go/v3 v3.1.0 h1:1N4YSkWYWffOpQHromYdOucBSQXhNRKzqtgICy6To8Q= -github.com/Azure/azure-amqp-common-go/v3 v3.1.0/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= -github.com/Azure/azure-event-hubs-go/v3 v3.3.7 h1:xOUxw5zVLnLX8VxS1/exhK1zZsmcoQio7Lzs6xOCIFE= -github.com/Azure/azure-event-hubs-go/v3 v3.3.7/go.mod h1:sszMsQpFy8Au2s2NColbnJY8lRVm1koW0XxBJ3rN5TY= -github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= -github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v51.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v52.6.0+incompatible h1:F/feBa+/Oxbu+Zprnsiq0b6rvUVlOEx3jSqCSNdtF3U= -github.com/Azure/azure-sdk-for-go v52.6.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= -github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= -github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= -github.com/Azure/go-amqp v0.13.6 h1:CWjyY59Iyc1sO/fE/AubMLMWf5id+Uiw/ph0bZzG9Ns= -github.com/Azure/go-amqp v0.13.6/go.mod h1:wbpCKA8tR5MLgRyIu+bb+S6ECdIDdYJ0NlpFE9xsBPI= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.1 h1:tYLp1ULvO7i3fI5vE21ReQuj99QFSs7lGm0xWyJo87o= +github.com/99designs/keyring v1.2.1/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20221103172237-443f56ff4ba8 h1:d+pBUmsteW5tM87xmVXHZ4+LibHRFn40SPAoZJOg2ak= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20221103172237-443f56ff4ba8/go.mod h1:i9fr2JpcEcY/IHEvzCM3qXUZYOQHgR89dt4es1CgMhc= +github.com/AthenZ/athenz v1.10.39 h1:mtwHTF/v62ewY2Z5KWhuZgVXftBej1/Tn80zx4DcawY= +github.com/AthenZ/athenz v1.10.39/go.mod h1:3Tg8HLsiQZp81BJY58JBeU2BR6B/H4/0MQGfCwhHNEA= +github.com/Azure/azure-amqp-common-go/v4 v4.2.0 h1:q/jLx1KJ8xeI8XGfkOWMN9XrXzAfVTkyvCxPvHCjd2I= +github.com/Azure/azure-amqp-common-go/v4 v4.2.0/go.mod h1:GD3m/WPPma+621UaU6KNjKEo5Hl09z86viKwQjTpV0Q= +github.com/Azure/azure-event-hubs-go/v3 v3.6.2 h1:7rNj1/iqS/i3mUKokA2n2eMYO72TB7lO7OmpbKoakKY= +github.com/Azure/azure-event-hubs-go/v3 v3.6.2/go.mod h1:n+ocYr9j2JCLYqUqz9eI+lx/TEAtL/g6rZzyTFSuIpc= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= +github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1 h1:E+OJmp2tPvt1W+amx48v1eqbjDYsgN+RzP4q16yV5eM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.11.1/go.mod h1:a6xsAQUZg+VsS3TJ05SRp524Hs4pZ/AeFSr5ENf0Yjo= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2 h1:FDif4R1+UUR+00q6wquyX90K7A8dN+R5E8GEadoP7sU= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.2/go.mod h1:aiYBYui4BJ/BJCAIKs92XiPyQfTaBWqvHujDwKb6CBU= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0 h1:rTfKOCZGy5ViVrlA74ZPE99a+SgoEE2K/yg3RyW9dFA= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.7.0/go.mod h1:4OG6tQ9EOP/MT0NMjDlRzWoVFxfu9rN9B2X+tlSVktg= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1 h1:o/Ws6bEqMeKZUfj1RRm3mQ51O8JGU5w+Qdg2AhHib6A= +github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus v1.7.1/go.mod h1:6QAMYBAbQeeKX+REFJMZ1nFWu9XLw/PPcjYpuc9RDFs= +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0 h1:lJwNFV+xYjHREUTHJKx/ZF6CJSt9znxmLw9DqSTvyRU= +github.com/Azure/azure-sdk-for-go/sdk/storage/azqueue v1.0.0/go.mod h1:GfT0aGew8Qj5yiQVqOO5v7N8fanbJGyUoHqXg56qcVY= +github.com/Azure/go-amqp v1.0.5 h1:po5+ljlcNSU8xtapHTe8gIc8yHxCzC03E8afH2g1ftU= +github.com/Azure/go-amqp v1.0.5/go.mod h1:vZAogwdrkbyK3Mla8m/CxSc/aKdnTZ4IbPxl51Y5WZE= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM= -github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= +github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q= -github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= +github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= +github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= +github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= +github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/IBM/sarama v1.43.0 h1:YFFDn8mMI2QL0wOrG0J2sFoVIAFl7hS9JQi2YZsXtJc= +github.com/IBM/sarama v1.43.0/go.mod h1:zlE6HEbC/SMQ9mhEYaF7nNLYOUyrs0obySKCckWP9BM= +github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= +github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.2.0 h1:P1ekkbuU73Ui/wS0nK1HOM37hh4xdfZo485UPf8rc+Y= -github.com/Masterminds/sprig/v3 v3.2.0/go.mod h1:tWhwTbUTndesPNeF0C900vKoq283u6zp4APT9vaF3SI= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PagerDuty/go-pagerduty v1.6.0 h1:am81SzvG5Pw+s3JZ5yEy6kGvsXXklTNRrGr3d8WKpsU= +github.com/PagerDuty/go-pagerduty v1.6.0/go.mod h1:7eaBLzsDpK7VUvU0SJ5mohczQkoWrrr5CjDaw5gh1as= +github.com/ProtonMail/go-crypto v1.0.0 h1:LRuvITjQWX+WIfr930YHG2HNfjR1uOfyf5vE0kC2U78= +github.com/ProtonMail/go-crypto v1.0.0/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/sarama v1.26.1 h1:3jnfWKD7gVwbB1KSy/lE0szA9duPuSFLViK0o/d3DgA= -github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= -github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60 h1:prBTRx78AQnXzivNT9Crhu564W/zPPr3ibSlpT9xKcE= +github.com/RocketChat/Rocket.Chat.Go.SDK v0.0.0-20210112200207-10ab4d695d60/go.mod h1:rjP7sIipbZcagro/6TCk6X0ZeFT2eyudH5+fve/cbBA= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 h1:ZBbLwSJqkHBuFDA6DUhhse0IGJ7T5bemHyNILUjvOq4= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2/go.mod h1:VSw57q4QFiWDbRnjdX8Cb3Ow0SFncRw+bA/ofY6Q83w= github.com/UnnoTed/fileb0x v1.1.4/go.mod h1:X59xXT18tdNk/D6j+KZySratBsuKJauMtVuJ9cgOiZs= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/ahmetb/gen-crd-api-reference-docs v0.2.0 h1:YI/cAcRdNAHArfhGKcmCY5qMa32k/UyCZagLgabC5JY= -github.com/ahmetb/gen-crd-api-reference-docs v0.2.0/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM= +github.com/ahmetb/gen-crd-api-reference-docs v0.3.0 h1:+XfOU14S4bGuwyvCijJwhhBIjYN+YXS18jrCY2EzJaY= +github.com/ahmetb/gen-crd-api-reference-docs v0.3.0/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= github.com/ajg/form v1.5.1 h1:t9c7v8JUKu/XxOGBU0yjNpaMloxGEJhUkqFRq0ibGeU= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andygrunwald/go-gerrit v0.0.0-20230325081502-da63a5c62d80 h1:7GeFYBed/nHbicP+zd2xzKHUjJnTyn3Ljgme8FXJykQ= +github.com/andygrunwald/go-gerrit v0.0.0-20230325081502-da63a5c62d80/go.mod h1:SeP12EkHZxEVjuJ2HZET304NBtHGG2X6w2Gzd0QXAZw= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= +github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/antonmedv/expr v1.8.8 h1:uVwIkIBNO2yn4vY2u2DQUqXTmv9jEEMCEcHa19G5weY= -github.com/antonmedv/expr v1.8.8/go.mod h1:5qsM3oLGDND7sDmQGDXHkYfkjYMUX14qsgqmHhwGEk8= +github.com/antonmedv/expr v1.15.5 h1:y0Iz3cEwmpRz5/r3w4qQR0MfIqJGdGM1zbhD/v0G5Vg= +github.com/antonmedv/expr v1.15.5/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE= github.com/apache/openwhisk-client-go v0.0.0-20190915054138-716c6f973eb2 h1:mOsBfI/27csXzqNYu7XAf14RPGsRrcXJ8fjaYIhkuVU= github.com/apache/openwhisk-client-go v0.0.0-20190915054138-716c6f973eb2/go.mod h1:jLLKYP7+1+LFlIJW1n9U1gqeveLM1HIwa4ZHNOFxjPw= -github.com/apache/pulsar-client-go v0.1.1 h1:v/kU+2ZCC6yFIcbZrFtWa9/nvVzVr18L+xYJUvZSxEQ= -github.com/apache/pulsar-client-go v0.1.1/go.mod h1:mlxC65KL1BLhGO2bnT9zWMttVzR2czVPb27D477YpyU= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/pulsar-client-go v0.12.0 h1:rrMlwpr6IgLRPXLRRh2vSlcw5tGV2PUSjZwmqgh2B2I= +github.com/apache/pulsar-client-go v0.12.0/go.mod h1:dkutuH4oS2pXiGm+Ti7fQZ4MRjrMPZ8IJeEGAWMeckk= +github.com/appscode/go v0.0.0-20191119085241-0887d8ec2ecc/go.mod h1:OawnOmAL4ZX3YaPdN+8HTNwBveT1jMsqP74moa9XUbE= github.com/ardielle/ardielle-go v1.5.2 h1:TilHTpHIQJ27R1Tl/iITBzMwiUGSlVfiVhwDNGM3Zj4= github.com/ardielle/ardielle-go v1.5.2/go.mod h1:I4hy1n795cUhaVt/ojz83SNVCYIGsAFAONtv2Dr7HUI= github.com/ardielle/ardielle-tools v1.5.4/go.mod h1:oZN+JRMnqGiIhrzkRN9l26Cej9dEx4jeNG6A+AdkShk= -github.com/argoproj/pkg v0.9.0 h1:PfWWYykfcEQdN0g41XLbVh/aonTjD+dPkvDp3hwpLYM= -github.com/argoproj/pkg v0.9.0/go.mod h1:ra+bQPmbVAoEL+gYSKesuigt4m49i3Qa3mE/xQcjCiA= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= +github.com/argoproj/notifications-engine v0.4.0 h1:XyE4jAw0oeRQKL9vlDQBnycmqhN7EIqUdWgPsSUqnkQ= +github.com/argoproj/notifications-engine v0.4.0/go.mod h1:uGas18+DbCCwjif1zSwWWuwR0xJ18FXF+c2dkhPbF2k= +github.com/argoproj/pkg v0.13.6 h1:36WPD9MNYECHcO1/R1pj6teYspiK7uMQLCgLGft2abM= +github.com/argoproj/pkg v0.13.6/go.mod h1:I698DoJBKuvNFaixh4vFl2C88cNIT1WS7KCbz5ewyF8= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/awalterschulze/gographviz v0.0.0-20200901124122-0eecad45bd71 h1:m3N1Fv5vE5IcxuTOGFGGV0grrVFHV8UY2SV0wSBXAC8= github.com/awalterschulze/gographviz v0.0.0-20200901124122-0eecad45bd71/go.mod h1:/ynarkO/43wP/JM2Okn61e8WFMtdbtA8he7GJxW+SFM= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.33.16 h1:h/3BL2BQMEbS67BPoEo/5jD8IPGVrKBmoa4S9mBBntw= -github.com/aws/aws-sdk-go v1.33.16/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/beefsack/go-rate v0.0.0-20180408011153-efa7637bb9b6/go.mod h1:6YNgTHLutezwnBvyneBbwvB8C82y3dcoOj5EQJIdGXA= +github.com/aws/aws-sdk-go v1.32.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.44.39/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.209 h1:wZuiaA4eaqYZmoZXqGgNHqVD7y7kUGFvACDGBgowTps= +github.com/aws/aws-sdk-go v1.44.209/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= +github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bits-and-blooms/bitset v1.4.0 h1:+YZ8ePm+He2pU3dZlIZiOeAKfrBkXi1lSrXJ/Xzgbu8= +github.com/bits-and-blooms/bitset v1.4.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/blushft/go-diagrams v0.0.0-20201006005127-c78c821223d9 h1:mV+hh0rMjzrhg7Jc/GKwpa+y/0BMHGOHdM9yY1GYyFI= github.com/blushft/go-diagrams v0.0.0-20201006005127-c78c821223d9/go.mod h1:nDeXEIaeDV+mAK1gBD3/RJH67DYPC0GdaznWN7sB07s= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/boynton/repl v0.0.0-20170116235056-348863958e3e/go.mod h1:Crc/GCZ3NXDVCio7Yr0o+SSrytpcFhLmVCIzi0s49t4= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 h1:R9d0v+iobRHSaE4wKUnXFiZp53AL4ED5MzgEMwGTZag= +github.com/bradleyfalzon/ghinstallation/v2 v2.11.0/go.mod h1:0LWKQwOHewXO/1acI6TtyE0Xc4ObDb2rFN7eHBAG71M= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/bwmarrin/discordgo v0.19.0/go.mod h1:O9S4p+ofTFwB02em7jkpkV8M3R0/PUVOwN61zSZ0r4Q= +github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudevents/sdk-go/v2 v2.1.0 h1:bmgrU8k+K2ppZ+G/q5xEQx/Xk9HRtJmkrEO3qtDO2k0= -github.com/cloudevents/sdk-go/v2 v2.1.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU= +github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= +github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21 h1:tuijfIjZyjZaHq9xDUh0tNitwXshJpbLkqMOJv4H3do= github.com/cloudfoundry/jibber_jabber v0.0.0-20151120183258-bcc4c8345a21/go.mod h1:po7NpZ/QiTKzBKyrsEAxwnTamCoh8uDk/egRpQ7siIc= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= github.com/colinmarc/hdfs v1.1.4-0.20180802165501-48eb8d6c34a9 h1:N98Et5DzDoJ1IO1cd8cZkXXT81W5+CR5S8rDU2I0HnM= github.com/colinmarc/hdfs v1.1.4-0.20180802165501-48eb8d6c34a9/go.mod h1:0DumPviB681UcSuJErAbDIOx6SIaJWj463TymfZG02I= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/danielm-codefresh/argo-multi-cluster v0.0.0-20220327145759-1c387c8ebc5f h1:EMOFoOz97OTxqo1B2i2dQ4LGg1aca8n1nd9U/+DMmxo= +github.com/danielm-codefresh/argo-multi-cluster v0.0.0-20220327145759-1c387c8ebc5f/go.mod h1:JQ0OpETZR6+qpI7pE/djefADPEPnQ4PMp7dDgE+iU6s= github.com/dave/jennifer v1.4.1/go.mod h1:7jEdnm+qBcxl8PC0zyp7vxcpSRnzXSt9r39tpTVGlwA= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimfeld/httptreemux v5.0.1+incompatible h1:Qj3gVcDNoOthBAqftuD596rm4wg/adLLz5xh5CmpiCA= github.com/dimfeld/httptreemux v5.0.1+incompatible/go.mod h1:rbUlSV+CCpv/SuqUTP/8Bk2O3LyUV436/yaRGkhP6Z0= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3 h1:7nllYTGLnq4CqBL27lV6oNfXzM2tJ2mrKF8E+aBXOV0= +github.com/doublerebel/bellows v0.0.0-20160303004610-f177d92a03d3/go.mod h1:v/MTKot4he5oRHGirOYGN4/hEOONNnWtDBLAzllSGMw= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= -github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= +github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= +github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/eclipse/paho.mqtt.golang v1.2.0 h1:1F8mhG9+aO5/xpdtFkW4SxOJB67ukuDC3t2y2qayIX0= github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/eclipse/paho.mqtt.golang v1.4.3 h1:2kwcUGn8seMUfWndX0hGbvH8r7crgcJguQNCyp70xik= +github.com/eclipse/paho.mqtt.golang v1.4.3/go.mod h1:CSYvoAlsMkhYOXh/oKyxa8EcBci6dVkLCbo5tTC1RIE= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= +github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.12.0+incompatible h1:SIvoTSbsMEwuM3dzFirLwKc4BH6VXP5CNf+G1FfJVr4= -github.com/emicklei/go-restful v2.12.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/emitter-io/go/v2 v2.0.9 h1:qA+cnG3kS2uLzo5ETFY6zbHBGl+FmNj0cGf3da7foA4= github.com/emitter-io/go/v2 v2.0.9/go.mod h1:St++epE1u/6ueCVw47xhu4shpkGNxKRVtkWv4Xi33mg= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fasthttp/websocket v1.4.2 h1:AU/zSiIIAuJjBMf5o+vO0syGOnEfvZRu40xIhW/3RuM= -github.com/fasthttp/websocket v1.4.2/go.mod h1:smsv/h4PBEBaU0XDTY5UwJTpZv69fQ0FfcLJr21mA6Y= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structs v1.0.0 h1:BrX964Rv5uQ3wwS+KRUAJCBBw5PQmgJfJ6v4yly5QwU= -github.com/fatih/structs v1.0.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= -github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gavv/httpexpect/v2 v2.2.0 h1:0VwaEBmQaNFHX9x591A8Up+8shCwdF/nF0qlRd/nI48= -github.com/gavv/httpexpect/v2 v2.2.0/go.mod h1:lnd0TqJLrP+wkJk3SFwtrpSlOAZQ7HaaIFuOYbgqgUM= -github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= -github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/gavv/httpexpect/v2 v2.16.0 h1:Ty2favARiTYTOkCRZGX7ojXXjGyNAIohM1lZ3vqaEwI= +github.com/gavv/httpexpect/v2 v2.16.0/go.mod h1:uJLaO+hQ25ukBJtQi750PsztObHybNllN+t+MbbW8PY= +github.com/gfleury/go-bitbucket-v1 v0.0.0-20210707202713-7d616f7c18ac h1:XrZTje1i6Q8Y03z5AApzVTwn1rkPKYp5iyEEAhd0CV8= +github.com/gfleury/go-bitbucket-v1 v0.0.0-20210707202713-7d616f7c18ac/go.mod h1:LB3osS9X2JMYmTzcCArHHLrndBAfcVLQAvUddfs+ONs= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= github.com/gizak/termui/v3 v3.1.0/go.mod h1:bXQEBkJpzxUAKf0+xq9MSWAvWZlE7c+aidmyFlkYTrY= -github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.1.0 h1:4pl5BV4o7ZG/lterP4S6WzJ6xr49Ba5ET9ygheTYahk= -github.com/go-git/go-billy/v5 v5.1.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.3.0 h1:8WKMtJR2j8RntEXR/uvTKagfEt4GYlwQ7mntE4+0GWc= -github.com/go-git/go-git/v5 v5.3.0/go.mod h1:xdX4bWJ48aOrdhnl2XqHYstHbbp6+LFS4r4X+lNVprw= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= +github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= +github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/analysis v0.19.10 h1:5BHISBAXOc/aJK25irLZnx2D3s6WyYaY9D4gmuz9fdE= -github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.6 h1:xZMThgv5SQ7SMbWtKFkCf9bBdvR2iEyw9k3zGZONuys= -github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= -github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/inflect v0.21.0 h1:FoBjBTQEcbg2cJUWX6uwL9OyIW8eqc9k4KhN4lfbeYk= +github.com/go-openapi/inflect v0.21.0/go.mod h1:INezMuUu7SJQc2AyR3WO0DqqYUJSj8Kb4hBd7WtjlAw= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.4/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/loads v0.19.5 h1:jZVYWawIQiA1NBnHla28ktg6hrcfTHsCE+3QLVRBIls= -github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= -github.com/go-openapi/runtime v0.19.20 h1:J/t+QIjbcoq8WJvjGxRKiFBhqUE8slS9SbmD0Oi/raQ= -github.com/go-openapi/runtime v0.19.20/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.20.2 h1:pFPUZsiIbZ20kLUcuCGeuQWG735fPMxW7wHF9BWlnQU= -github.com/go-openapi/spec v0.20.2/go.mod h1:RW6Xcbs6LOyWLU/mXGdzn2Qc+3aj+ASfI7rvSZh1Vls= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= -github.com/go-openapi/swag v0.19.13 h1:233UVgMy1DlmCYYfOiFpta6e2urloh+sEs5id6lyzog= -github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.19.10 h1:tG3SZ5DC5KF4cyt7nqLVcQXGj5A7mpaYkAcNPlDK+Yk= -github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= -github.com/go-redis/redis v6.15.8+incompatible h1:BKZuG6mCnRj5AOaWJXoCgf6rqTYnYJLe4en2hxT7r9o= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-resty/resty/v2 v2.3.0 h1:JOOeAvjSlapTT92p8xiS19Zxev1neGikoHsXJeOq8So= -github.com/go-resty/resty/v2 v2.3.0/go.mod h1:UpN9CgLZNsv4e9XG50UU8xdI0F43UQ4HmxLBDwaroHU= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gGcHOs= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= +github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-swagger/go-swagger v0.25.0 h1:FxhyrWWV8V/A9P6GtI5szWordAdbb6Y0nqdY/y9So2w= -github.com/go-swagger/go-swagger v0.25.0/go.mod h1:9639ioXrPX9E6BbnbaDklGXjNz7upAXoNBwL4Ok11Vk= -github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013 h1:l9rI6sNaZgNC0LnF3MiE+qTmyBA/tZAg1rtyrGbUMK0= -github.com/go-swagger/scan-repo-boundary v0.0.0-20180623220736-973b3573c013/go.mod h1:b65mBPzqzZWxOZGxSWrqs4GInLIn+u99Q9q7p+GKni0= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-swagger/go-swagger v0.31.0 h1:H8eOYQnY2u7vNKWDNykv2xJP3pBhRG/R+SOCAmKrLlc= +github.com/go-swagger/go-swagger v0.31.0/go.mod h1:WSigRRWEig8zV6t6Sm8Y+EmUjlzA/HoaZJ5edupq7po= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1 h1:wG8n/XJQ07TmjbITcGiUaOtXxdrINDz1b0J1w0SzqDc= +github.com/go-telegram-bot-api/telegram-bot-api/v5 v5.5.1/go.mod h1:A2S0CWkNylc2phvKXWBBdD3K0iGnDBGbzRpISP2zBl8= github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho= github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.2.0 h1:EWCvMGGxOjsgwlWaP+f4+Hh6yrrte7JeFL2S6b+0hdM= -github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY= +github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe h1:zn8tqiUbec4wR94o7Qj3LZCAT6uGobhEgnDRg6isG5U= github.com/gobwas/glob v0.2.4-0.20181002190808-e7a84e9525fe/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= +github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -474,544 +382,535 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github/v31 v31.0.0 h1:JJUxlP9lFK+ziXKimTCprajMApV1ecWD4NB6CCb0plo= -github.com/google/go-github/v31 v31.0.0/go.mod h1:NQPZol8/1sMoWYGN2yaALIBytu17gAWfhbweiEed3pM= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github/v41 v41.0.0 h1:HseJrM2JFf2vfiZJ8anY2hqBjdfY1Vlj/K27ueww4gg= +github.com/google/go-github/v41 v41.0.0/go.mod h1:XgmCA5H323A9rtgExdTcnDkcqp6S30AVACCBDOonIxg= +github.com/google/go-github/v50 v50.2.0 h1:j2FyongEHlO9nxXLc+LP3wuBSVU9mVxfpdYUexMpIfk= +github.com/google/go-github/v50 v50.2.0/go.mod h1:VBY8FB6yPIjrtKhozXv4FQupxKLS6H4m6xFZlT43q8Q= +github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= +github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gopackage/ddp v0.0.0-20170117053602-652027933df4 h1:4EZlYQIiyecYJlUbVkFXCXHz1QPhVXcHnQKAzBTPfQo= +github.com/gopackage/ddp v0.0.0-20170117053602-652027933df4/go.mod h1:lEO7XoHJ/xNRBCxrn4h/CEB67h0kW1B0t4ooP2yrjUA= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= -github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.0.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE= +github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregdel/pushover v1.1.0 h1:dwHyvrcpZCOS9V1fAnKPaGRRI5OC55cVaKhMybqNsKQ= +github.com/gregdel/pushover v1.1.0/go.mod h1:EcaO66Nn1StkpEm1iKtBTV3d2A16SoMsVER1PthX7to= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/hamba/avro v1.8.0 h1:eCVrLX7UYThA3R3yBZ+rpmafA5qTc3ZjpTz6gYJoVGU= +github.com/hamba/avro v1.8.0/go.mod h1:NiGUcrLLT+CKfGu5REWQtD9OVPPYUGMVFiC+DE0lQfY= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= +github.com/hashicorp/go-hclog v1.1.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= +github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.1/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.4 h1:BbgctKO892xEyOXnGiaAwIoSq1QZ/SS4AhjoAh9DnfY= -github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= +github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/raft v1.1.1 h1:HJr7UE1x/JrJSc9Oy6aDBHtNHUUBHjcQjTgvUVihoZs= -github.com/hashicorp/raft v1.1.1/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e h1:0aewS5NTyxftZHSnFaJmWE5oCCrj4DyEXkAiMa1iZJM= -github.com/hokaccha/go-prettyjson v0.0.0-20190818114111-108c894c2c0e/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/hashicorp/raft v1.3.6 h1:v5xW5KzByoerQlN/o31VJrFNiozgzGyDoMgDJgXpsto= +github.com/hashicorp/raft v1.3.6/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.1 h1:4jgBlKK6tLKFvO8u5pmYjG91cqytmDCDvGh7ECVFfFs= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.1.1/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imkira/go-interpol v1.0.0 h1:HrmLyvOLJyjR0YofMw8QGdCIuYOs4TJUBDNU5sJC09E= -github.com/imkira/go-interpol v1.0.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/imkira/go-interpol v1.1.0 h1:KIiKr0VSG2CUW1hl1jpiyuzuJeKUUpC8iM1AIE7N1Vk= +github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/itchyny/gojq v0.12.16 h1:yLfgLxhIr/6sJNVmYfQjTIv0jGctu6/DgDoivmxTr7g= +github.com/itchyny/gojq v0.12.16/go.mod h1:6abHbdC2uB9ogMS38XsErnfqJ94UlngIJGlRAIj4jTM= +github.com/itchyny/timefmt-go v0.1.6 h1:ia3s54iciXDdzWzwaVKXZPbiXzxxnv1SPGFfM/myJ5Q= +github.com/itchyny/timefmt-go v0.1.6/go.mod h1:RRDZYC5s9ErkjQvTvvU7keJjxUYzIISJGxm9/mAERQg= github.com/jawher/mow.cli v1.0.4/go.mod h1:5hQj2V8g+qYmLUVWqu4Wuja1pI57M83EChYLVZ0sMKk= -github.com/jawher/mow.cli v1.1.0/go.mod h1:aNaQlc7ozF3vw6IJ2dHjp2ZFiA4ozMIYY6PyuRJwlUg= +github.com/jawher/mow.cli v1.2.0/go.mod h1:y+pcA3jBAdo/GIZx/0rFjw/K2bVEODP9rfZOfaiq8Ko= +github.com/jaytaylor/html2text v0.0.0-20190408195923-01ec452cbe43/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= -github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8= +github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/joncalhoun/qson v0.0.0-20200422171543-84433dcd3da0 h1:ct2XA1aDw8A07Dr8gtrrZgIgLKcZNAl2o9nn0WRMK4Y= github.com/joncalhoun/qson v0.0.0-20200422171543-84433dcd3da0/go.mod h1:DFXrEwSRX0p/aSvxE21319menCBFeQO0jXpRj7LEZUA= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/karrick/godirwalk v1.7.8/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.10.8 h1:eLeJ3dr/Y9+XRfJT4l+8ZjmtB5RPJhucH2HeCV5+IZY= -github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.14.4/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ktrysmt/go-bitbucket v0.9.80 h1:S+vZTXKx/VG5yCaX4I3Bmwo8lxWr4ifvuHdTboHTMMc= +github.com/ktrysmt/go-bitbucket v0.9.80/go.mod h1:b8ogWEGxQMWoeFnT1ZE4aHIPGindI+9z/zAW/OVFjk0= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo v3.2.1+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= github.com/labstack/gommon v0.2.7/go.mod h1:/tj9csK2iPSBvn+3NLM9e52usepMtrd5ilFYA+wQNJ4= -github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU= -github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac h1:+2b6iGRJe3hvV/yVXrd41yVEjxuFHxasJqDhkIjS4gk= -github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= -github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= -github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/linkedin/goavro/v2 v2.9.7/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/linkedin/goavro/v2 v2.11.1 h1:4cuAtbDfqkKnBXp9E+tRkIJGa6W6iAjwonwt8O1f4U0= +github.com/linkedin/goavro/v2 v2.11.1/go.mod h1:UgQUb2N/pmueQYH9bfqFioWxzYCZXSfF8Jw03O5sjqA= +github.com/lusis/go-slackbot v0.0.0-20180109053408-401027ccfef5/go.mod h1:c2mYKRyMb1BPkO5St0c/ps62L4S0W2NAkaTXj9qEI+0= +github.com/lusis/slack-test v0.0.0-20190426140909-c40012f20018/go.mod h1:sFlOUpQL1YcjhFVXhg1CG8ZASEs/Mf1oVb6H75JL/zg= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailgun/mailgun-go v2.0.0+incompatible/go.mod h1:NWTyU+O4aczg/nsGhQnvHL6v2n5Gy6Sv5tNDVvC6FbU= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE= -github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go v1.0.1-0.20190523192347-c6c2912aa552 h1:czBFVgFWckvUt4DmJ9Jp40KA3qAawEaC2fi7WEF84K4= -github.com/minio/minio-go v1.0.1-0.20190523192347-c6c2912aa552/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM= -github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.29/go.mod h1:x81+AX5gHSfCSqw7jxRKHvxUXMlE5uKX0Vb75Xk5yYg= +github.com/minio/minio-go/v7 v7.0.70 h1:1u9NtMgfK1U42kUxcsl5v0yj6TEOPR497OAQxpJnn2g= +github.com/minio/minio-go/v7 v7.0.70/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nats-io/gnatsd v1.4.1 h1:RconcfDeWpKCD6QIIwiVFcvForlXpWeJP7i5/lDLy44= -github.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ= -github.com/nats-io/go-nats v1.7.2 h1:cJujlwCYR8iMz5ofZSD/p2WLW8FabhkQ2lIEVbSvNSA= -github.com/nats-io/go-nats v1.7.2/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0= -github.com/nats-io/graft v0.0.0-20200605173148-348798afea05 h1:wF/dApMICOCM+/c/1dpFxooYGwmSUvclQMT9CRjnEbM= -github.com/nats-io/graft v0.0.0-20200605173148-348798afea05/go.mod h1:idnzXeCwCx69FMg+R0DyD4/OhrF1A+v3BqF5xSz+tS4= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2 h1:+RB5hMpXUUA2dfxuhBTEkMOrYmM+gKIZYS1KjSostMI= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats-server/v2 v2.1.4/go.mod h1:Jw1Z28soD/QasIA2uWjXyM9El1jly3YwyFOuR8tH1rg= -github.com/nats-io/nats-server/v2 v2.1.7 h1:jCoQwDvRYJy3OpOTHeYfvIPLP46BMeDmH7XEJg/r42I= -github.com/nats-io/nats-server/v2 v2.1.7/go.mod h1:rbRrRE/Iv93O/rUvZ9dh4NfT0Cm9HWjW/BqOWLGgYiE= -github.com/nats-io/nats-streaming-server v0.17.0 h1:eYhSmjRmRsCYNsoUshmZ+RgKbhq6B+7FvMHXo3M5yMs= -github.com/nats-io/nats-streaming-server v0.17.0/go.mod h1:ewPBEsmp62Znl3dcRsYtlcfwudxHEdYMtYqUQSt4fE0= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nats.go v1.10.0 h1:L8qnKaofSfNFbXg0C5F71LdjPRnmQwSsA4ukmkt1TvY= -github.com/nats-io/nats.go v1.10.0/go.mod h1:AjGArbfyR50+afOUotNX2Xs5SYHf+CoOa5HH1eEl2HE= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.4 h1:aEsHIssIk6ETN5m2/MD8Y4B2X7FfXrBAUdkyRvbVYzA= -github.com/nats-io/nkeys v0.1.4/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/graft v0.0.0-20220215174245-93d18541496f h1:UE9EK14XcoK/PmGqPtVWlrdMoPzBwJyzTCWEJ+cW7DI= +github.com/nats-io/graft v0.0.0-20220215174245-93d18541496f/go.mod h1:FDlTkeZBkKG5O+8RL3R0Q3gyhhHwG5sxcXcV7Lnx9x4= +github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak= +github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= +github.com/nats-io/nats-server/v2 v2.7.2/go.mod h1:tckmrt0M6bVaDT3kmh9UrIq/CBOBBse+TpXQi5ldaa8= +github.com/nats-io/nats-server/v2 v2.7.4/go.mod h1:1vZ2Nijh8tcyNe8BDVyTviCd9NYzRbubQYiEHsvOQWc= +github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU= +github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0= +github.com/nats-io/nats-streaming-server v0.24.3 h1:uZez8jBkXscua++jaDsK7DhpSAkizdetar6yWbPMRco= +github.com/nats-io/nats-streaming-server v0.24.3/go.mod h1:rqWfyCbxlhKj//fAp8POdQzeADwqkVhZcoWlbhkuU5w= +github.com/nats-io/nats.go v1.13.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.13.1-0.20220121202836-972a071d373d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA= +github.com/nats-io/nats.go v1.35.0 h1:XFNqNM7v5B+MQMKqVGAyHwYhyKb48jrenXNxIU20ULk= +github.com/nats-io/nats.go v1.35.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= +github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/nats-io/stan.go v0.6.0 h1:26IJPeykh88d8KVLT4jJCIxCyUBOC5/IQup8oWD/QYY= -github.com/nats-io/stan.go v0.6.0/go.mod h1:eIcD5bi3pqbHT/xIIvXMwvzXYElgouBvaVRftaE+eac= +github.com/nats-io/stan.go v0.10.2/go.mod h1:vo2ax8K2IxaR3JtEMLZRFKIdoK/3o1/PKueapB7ezX0= +github.com/nats-io/stan.go v0.10.4 h1:19GS/eD1SeQJaVkeM9EkvEYattnvnWrZ3wkSWSw4uXw= +github.com/nats-io/stan.go v0.10.4/go.mod h1:3XJXH8GagrGqajoO/9+HgPyKV5MWsv7S5ccdda+pc6k= github.com/nicksnyder/go-i18n v1.10.1-0.20190510212457-b280125b035a h1:WsVgYECoTBctNmskVv/BZ8gh/TWP1xJf61PSW9HBdRY= github.com/nicksnyder/go-i18n v1.10.1-0.20190510212457-b280125b035a/go.mod h1:e4Di5xjP9oTVrC6y3C7C0HoSYXjSbhh/dU0eUV32nB4= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nlopes/slack v0.5.0/go.mod h1:jVI4BBK3lSktibKahxBF74txcK2vyvkza1z/+rRnVAM= github.com/nsf/termbox-go v0.0.0-20190121233118-02980233997d/go.mod h1:IuKpRQcYE1Tfu+oAQqaLisqDeXgjyyltCfsaoYN18NQ= -github.com/nsqio/go-nsq v1.0.8 h1:3L2F8tNLlwXXlp2slDUrUWSBn2O3nMh8R1/KEDFTHPk= -github.com/nsqio/go-nsq v1.0.8/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nsqio/go-nsq v1.1.0 h1:PQg+xxiUjA7V+TLdXw7nVrJ5Jbl3sN86EhGCQj4+FYE= +github.com/nsqio/go-nsq v1.1.0/go.mod h1:vKq36oyeVXgsS5Q8YEO7WghqidAVXQlcFxzQbQTuDEY= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 h1:Yl0tPBa8QPjGmesFh1D0rDy+q1Twx6FyU7VWHi8wZbI= github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852/go.mod h1:eqOVx5Vwu4gd2mmMZvVZsgIqNSaW3xxRThUJ0k/TPk4= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.14.0 h1:vSmGj2Z5YPb9JwCWT6z6ihcUvDhuXLc3sJiqd3jMKAY= +github.com/onsi/ginkgo/v2 v2.14.0/go.mod h1:JkUdW7JkN0V6rFvsHcJ478egV3XH9NxpD27Hal/PhZw= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/opsgenie/opsgenie-go-sdk-v2 v1.0.5 h1:AnS8ZCC5dle8P4X4FZ+IOlX9v0jAkCMiZDIzRnYwBbs= +github.com/opsgenie/opsgenie-go-sdk-v2 v1.0.5/go.mod h1:f0ezb0R/mrB9Hpm5RrIS6EX3ydjsR2nAB88nYYXZcNY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= -github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw= -github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pierrec/lz4 v2.5.0+incompatible h1:MbdIZ43A//duwOjQqK3nP+up+65yraNFyX3Vp6Rwues= -github.com/pierrec/lz4 v2.5.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM= +github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= +github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= +github.com/pkg/diff v0.0.0-20200914180035-5b29258ca4f7/go.mod h1:zO8QMzTeZd5cpnIkz/Gn6iK0jDfGicM1nynOkkPIl28= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0 h1:wH4vA7pcjKuZzjF7lM8awk4fnuJO6idemZXoKnULUx4= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw= +github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o= github.com/radovskyb/watcher v1.0.7 h1:AYePLih6dpmS32vlHfhCeli8127LzkIgwJGcwwe8tUE= github.com/radovskyb/watcher v1.0.7/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm+ZuvsUYIg= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84= -github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= -github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/riferrei/srclient v0.5.4 h1:dfwyR5u23QF7beuVl2WemUY2KXh5+Sc4DHKyPXBNYuc= +github.com/riferrei/srclient v0.5.4/go.mod h1:vbkLmWcgYa7JgfPvuy/+K8fTS0p1bApqadxrxi/S1MI= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sanity-io/litter v1.2.0/go.mod h1:JF6pZUFgu2Q0sBZ+HSV35P8TVPI1TTzEwyu9FXAw2W4= -github.com/savsgio/gotils v0.0.0-20200117113501-90175b0fbe3f h1:PgA+Olipyj258EIEYnpFFONrrCcAIWNUNoFhUfMqAGY= -github.com/savsgio/gotils v0.0.0-20200117113501-90175b0fbe3f/go.mod h1:lHhJedqxCoHN+zMtwGNTXWmF0u9Jt363FYRhV6g0CdY= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sanity-io/litter v1.5.5 h1:iE+sBxPBzoK6uaEP5Lt3fHNgpKcHXc/A2HGETy0uJQo= +github.com/sanity-io/litter v1.5.5/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= +github.com/santhosh-tekuri/jsonschema/v5 v5.0.0 h1:TToq11gyfNlrMFZiYujSekIsPd9AmsA2Bj/iv+s4JHE= +github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/slack-go/slack v0.7.4 h1:Z+7CmUDV+ym4lYLA4NNLFIpr3+nDgViHrx8xsuXgrYs= -github.com/slack-go/slack v0.7.4/go.mod h1:FGqNzJBmxIsZURAxh2a8D21AnOVvvXZvGligs4npPUM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/slack-go/slack v0.13.0 h1:7my/pR2ubZJ9912p9FtvALYpbt0cQPAqkRy2jaSI1PQ= +github.com/slack-go/slack v0.13.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3 h1:hBSHahWMEgzwRyS6dRpxY0XyjZsHyQ61s084wo5PJe0= -github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/sony/sonyflake v1.0.0 h1:MpU6Ro7tfXwgn2l5eluf9xQvQJDROTBImNCfRXn/YeM= +github.com/sony/sonyflake v1.0.0/go.mod h1:Jv3cfhf/UFtolOTTRd3q4Nl6ENqM+KfyZ5PseKfZGF4= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.3.2 h1:GDarE4TJQI52kYSbSAmLiId1Elfj+xgSDqrUZxFhxlU= -github.com/spf13/afero v1.3.2/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0 h1:xVKxvI7ouOI5I+U9s2eeiUfMaWBVoXA3AWskkrqK0VM= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo= -github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stripe/stripe-go v70.15.0+incompatible h1:hNML7M1zx8RgtepEMlxyu/FpVPrP7KZm1gPFQquJQvM= github.com/stripe/stripe-go v70.15.0+incompatible/go.mod h1:A1dQZmO/QypXmsL0T8axYZkSN/uA/T/A64pfKdBAMiY= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/gjson v1.6.0/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= -github.com/tidwall/gjson v1.7.5 h1:zmAN/xmX7OtpAkv4Ovfso60r/BiCi5IErCDYGNJu+uc= -github.com/tidwall/gjson v1.7.5/go.mod h1:5/xDoumyyDNerp2U36lyolv46b3uF/9Bu6OfyQ9GImk= -github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= -github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.1/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.1.0 h1:K3hMW5epkdAVwibsQEfR/7Zj0Qgt4DxtNumTq/VloO8= -github.com/tidwall/pretty v1.1.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/sjson v1.1.1 h1:7h1vk049Jnd5EH9NyzNiEuwYW4b5qgreBbqRC19AS3U= -github.com/tidwall/sjson v1.1.1/go.mod h1:yvVuSnpEQv5cYIrO+AT6kw4QVfd5SDZoGIS7/5+fZFs= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= +github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.17.1 h1:wlYEnwqAHgzmhNUFfw7Xalt2JzQvsMx2Se4PcoFCT/U= +github.com/tidwall/gjson v1.17.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.4 h1:cuiLzLnaMeBhRmEv00Lpk3tkYrcxpmbU81tAY4Dw0tc= +github.com/tidwall/sjson v1.2.4/go.mod h1:098SZ494YoMWPmMO6ct4dcFnqxwj9r/gF0Etp19pSNM= github.com/toqueteos/webbrowser v1.2.0 h1:tVP/gpK69Fx+qMJKsLE7TD8LuGWPnEV71wBN9rrstGQ= github.com/toqueteos/webbrowser v1.2.0/go.mod h1:XWoZq4cyp9WeUeak7w7LXRUQf1F1ATJMir8RTqb4ayM= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.9.0 h1:hNpmUdy/+ZXYpGy0OBfm7K0UQTzb73W0T0U4iJIVrMw= -github.com/valyala/fasthttp v1.9.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= +github.com/valyala/fasthttp v1.34.0 h1:d3AAQJ2DRcxJYHm7OXNXtXt2as1vMDfxeIcFvhmGGm4= +github.com/valyala/fasthttp v1.34.0/go.mod h1:epZA5N+7pY6ZaEKRmstzOuYJx9HI8DI1oaCGZpdH4h0= github.com/valyala/fasttemplate v0.0.0-20170224212429-dcecefd839c4/go.mod h1:50wTf68f99/Zt14pr046Tgt3Lp2vLyFZKzbFXTOabXw= -github.com/valyala/gozstd v1.7.0 h1:Ljh5c9zboqLhwTI33al32R72iCZfn0mCbVGcFWbGwRQ= -github.com/valyala/gozstd v1.7.0/go.mod h1:y5Ew47GLlP37EkTB+B4s7r6A5rdaeB7ftbl9zoYiIPQ= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xanzy/go-gitlab v0.33.0 h1:MUJZknbLhVXSFzBA5eqGGhQ2yHSu8tPbGBPeB3sN4B0= -github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= -github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0 h1:qqllXPzXh+So+mmANlX/gCJrgo+1kQyshMoQ+NASzm0= +github.com/whilp/git-urls v0.0.0-20191001220047-6db9661140c0/go.mod h1:2rx5KE5FLD0HRfkkpyn8JwbVLBdhgeiOb2D2D9LLKM4= +github.com/xanzy/go-gitlab v0.105.0 h1:3nyLq0ESez0crcaM19o5S//SvezOQguuIHZ3wgX64hM= +github.com/xanzy/go-gitlab v0.105.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY= +github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= +github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8= +github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg= -github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yahoo/athenz v1.8.55 h1:xGhxN3yLq334APyn0Zvcc+aqu78Q7BBhYJevM3EtTW0= -github.com/yahoo/athenz v1.8.55/go.mod h1:G7LLFUH7Z/r4QAB7FfudfuA7Am/eCzO1GlzBhDL6Kv0= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 h1:6fRhSjgLCkTD3JnJxvaJ4Sj+TYblw757bqYgZaOq5ZY= github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= github.com/yudai/gojsondiff v1.0.0 h1:27cbfqXLVEJ1o8I6v3y9lg8Ydm53EKqHXAOMxEGlCOA= @@ -1021,348 +920,299 @@ github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDf github.com/yudai/pp v2.0.1+incompatible h1:Q4//iY4pNF6yPLZIigmvcl7k/bPgrcTPIFIcmawg5bI= github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= -go.mongodb.org/mongo-driver v1.3.5 h1:S0ZOruh4YGHjD7JoN7mIsTrNjnQbOjrmgrx6l6pZN7I= -go.mongodb.org/mongo-driver v1.3.5/go.mod h1:Ual6Gkco7ZGQw8wE1t4tLnvBsf6yVSM60qW6TgOeJ5c= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= +go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80= +go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0= +go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200206161412-a0c6ece9d31a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220307211146-efcb8507fb70/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925 h1:5XVKs2rlCg8EFyRcvO8/XFwYxh1oKJO1Q3X5vttIf9c= golang.org/x/exp v0.0.0-20200908183739-ae8ad444f925/go.mod h1:1phAWC201xIgDyaFpmDeZkgf70Q4Pd/CNqfRtVPtxNw= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f h1:99ci1mjWVBWwJiEKYY6jWa4d2nTQVIEhZIptnrVb1XY= +golang.org/x/exp v0.0.0-20240416160154-fe59bbe5cc7f/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180921000356-2f5d2388922f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190607181551-461777fb6f67/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897 h1:KrsHThm5nFk34YtATK1LsThyGhGbGe1olrte/HInHvs= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181019160139-8e24a49d80f8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492 h1:Paq34FxTluEPvVyayQqMPgHm+vTOrIifmcYxFBx9TLg= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220406155245-289d7a0edf71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190213192042-740235f6c0d8/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190808195139-e713427fea3f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ= +golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45 h1:juzzlx91nWAOsHuOVfXZPMXHtJEKouZvY9bBbwlOeYs= +gomodules.xyz/envconfig v1.3.1-0.20190308184047-426f31af0d45/go.mod h1:41y72mzHT7+jFNgyBpJRrZWuZJcLmLrTpq6iGgOFJMQ= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gomodules.xyz/notify v0.1.1 h1:1tTuoyswmPvzqPCTEDQK8SZ3ukCxLsonAAwst2+y1a0= +gomodules.xyz/notify v0.1.1/go.mod h1:QgQyU4xEA/plJcDeT66J2Go2V7U4c0pD9wjo7HfFil4= +gomodules.xyz/version v0.1.0/go.mod h1:Y8xuV02mL/45psyPKG3NCVOwvAOy6T5Kx0l3rCjKSjU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.1 h1:5mMS6mYvK5LVB8+ujVBC33Y8gltBo/kT6HBm6kU80G4= -google.golang.org/api v0.15.1/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4= +google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1372,117 +1222,108 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= +gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df h1:n7WqCuqOuCbNr617RXOY0AWRXxgwEyPp2z+p0+hgMuE= +gopkg.in/gomail.v2 v2.0.0-20160411212932-81ebce5c23df/go.mod h1:LRQQ+SO6ZHR7tOkpBDuZnXENFzX8qRjMDMyPD6BRkCw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v2 v2.0.0 h1:6Bmcdaxb0dD3HyHbo/MtJ2Q1wXLDuZJFwXZmuZvM+zw= gopkg.in/jcmturner/goidentity.v2 v2.0.0/go.mod h1:vCwK9HeXksMeUmQ4SxDd1tRz4LejrKh3KRVjQWhjvZI= -gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= -gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v5 v5.3.0 h1:RS1MYApX27Hx1Xw7NECs7XxGxxrm69/4OmaRuX9kwec= gopkg.in/jcmturner/gokrb5.v5 v5.3.0/go.mod h1:oQz8Wc5GsctOTgCVyKad1Vw4TCWz5G6gfIQr88RPv4k= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlIrg= -gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v0 v0.0.2 h1:wBTgrbL1qmLBUPsYVCqdJiI5aJgQhexmK+JkTHPUNJI= gopkg.in/jcmturner/rpc.v0 v0.0.2/go.mod h1:NzMq6cRzR9lipgw7WxRBHNx5N8SifBuaCQsOT1kWY/E= -gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= -gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.19.6 h1:F3lfwgpKcKms6F1mMqkQXFzXmme8QqHTJBtBkev3TOg= -k8s.io/api v0.19.6/go.mod h1:Plxx44Nh4zVblkJrIgxVPgPre1mvng6tXf1Sj3bs0fU= -k8s.io/apiextensions-apiserver v0.19.6 h1:LL7H65E2VTBfxmsWQZth60zzWVtbSN2gWMEWfsuDvIQ= -k8s.io/apiextensions-apiserver v0.19.6/go.mod h1:9s8ceL67UJAD1ewbsn07tkQ7/EGjiKOedKyiUCVXJgQ= -k8s.io/apimachinery v0.19.7-rc.0 h1:oQqSmtJvFkDIyj9BNVbdU2a6h2W/02wLKZfvIyQS/V4= -k8s.io/apimachinery v0.19.7-rc.0/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= -k8s.io/apiserver v0.19.6/go.mod h1:05XquZxCDzQ27ebk7uV2LrFIK4lm5Yt47XkkUvLAoAM= -k8s.io/client-go v0.19.6 h1:vtPb33nP8DBMW+/CyuJ8fiie36c3CM1Ts6L4Tsr+PtU= -k8s.io/client-go v0.19.6/go.mod h1:gEiS+efRlXYUEQ9Oz4lmNXlxAl5JZ8y2zbTDGhvXXnk= -k8s.io/code-generator v0.19.7-rc.0 h1:UHmlxBhlnDoYEppX4A1ecHeylTVyHfmnUIdrehH6gxA= -k8s.io/code-generator v0.19.7-rc.0/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= -k8s.io/component-base v0.19.6 h1:V76d3rIEWvP95peWgRycKslQnEwlaPy4UORvh3+YBbU= -k8s.io/component-base v0.19.6/go.mod h1:8Btsf8J00/fVDa/YFmXjei7gVkcFrlKZXjSeP4SZNJg= +k8s.io/api v0.17.8/go.mod h1:N++Llhs8kCixMUoCaXXAyMMPbo8dDVnh+IQ36xZV2/0= +k8s.io/api v0.29.2 h1:hBC7B9+MU+ptchxEqTNW2DkUosJpp1P+Wn6YncZ474A= +k8s.io/api v0.29.2/go.mod h1:sdIaaKuU7P44aoyyLlikSLayT6Vb7bvJNCX105xZXY0= +k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3Q4lV0= +k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc= +k8s.io/apimachinery v0.17.8/go.mod h1:Lg8zZ5iC/O8UjCqW6DNhcQG2m4TdjF9kwG3891OWbbA= +k8s.io/apimachinery v0.29.2 h1:EWGpfJ856oj11C52NRCHuU7rFDwxev48z+6DSlGNsV8= +k8s.io/apimachinery v0.29.2/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.17.8/go.mod h1:SJsDS64AAtt9VZyeaQMb4Ck5etCitZ/FwajWdzua5eY= +k8s.io/client-go v0.29.0 h1:KmlDtFcrdUzOYrBhXHgKw5ycWzc3ryPX5mQe0SkG3y8= +k8s.io/client-go v0.29.0/go.mod h1:yLkXH4HKMAywcrD82KMSmfYg2DlE8mepPR4JGSo5n38= +k8s.io/code-generator v0.29.2 h1:c9/iw2KnNpw2IRV+wwuG/Wns2TjPSgjWzbbjTevyiHI= +k8s.io/code-generator v0.29.2/go.mod h1:FwFi3C9jCrmbPjekhaCYcYG1n07CYiW1+PAPCockaos= +k8s.io/component-base v0.29.0 h1:T7rjd5wvLnPBV1vC4zWd/iWRbV8Mdxs+nGaoaFzGw3s= +k8s.io/component-base v0.29.0/go.mod h1:sADonFTQ9Zc9yFLghpDpmNXEdHyQmFIGbiuZbqAXQ1M= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14 h1:t4L10Qfx/p7ASH3gXCdIUtPbbIuegCoUJf3TMSFekjw= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 h1:pWEwq4Asjm4vjW7vcsmijwBhOr1/shsbSYiWXmNGlks= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.5.0 h1:8mOnjf1RmUPW6KRqQCfYSZq/K20Unmp3IhuZUhxl8KI= k8s.io/klog/v2 v2.5.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -moul.io/http2curl v1.0.1-0.20190925090545-5cd742060b0e h1:C7q+e9M5nggAvWfVg9Nl66kebKeuJlP3FD58V4RR5wo= -moul.io/http2curl v1.0.1-0.20190925090545-5cd742060b0e/go.mod h1:nejbQVfXh96n9dSF6cH3Jsk/QI1Z2oEL7sSI2ifXFNA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= -sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= -sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= -sigs.k8s.io/controller-tools v0.4.1 h1:VkuV0MxlRPmRu5iTgBZU4UxUX2LiR99n3sdQGRxZF4w= -sigs.k8s.io/controller-tools v0.4.1/go.mod h1:G9rHdZMVlBDocIxGkK3jHLWqcTMNvveypYJwrvYKjWU= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= +moul.io/http2curl/v2 v2.3.0/go.mod h1:RW4hyBjTWSYDOxapodpNEtX0g5Eb16sxklBqmd2RHcE= +nhooyr.io/websocket v1.8.11 h1:f/qXNc2/3DpoSZkHt1DQu6rj4zGC8JmkkLkWss0MgN0= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/controller-runtime v0.17.2 h1:FwHwD1CTUemg0pW2otk7/U5/i5m2ymzvOXdbeGOUvw0= +sigs.k8s.io/controller-runtime v0.17.2/go.mod h1:+MngTvIQQQhfXtwfdGw/UOQ/aIaqsYywfCINOtwMO/s= +sigs.k8s.io/controller-tools v0.8.0 h1:uUkfTGEwrguqYYfcI2RRGUnC8mYdCFDqfwPKUcNJh1o= +sigs.k8s.io/controller-tools v0.8.0/go.mod h1:qE2DXhVOiEq5ijmINcFbqi9GZrrUjzB1TuJU0xa6eoY= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/changelog.sh b/hack/changelog.sh new file mode 100755 index 0000000000..fe079bf88a --- /dev/null +++ b/hack/changelog.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env sh +set -eu + +echo '# Changelog' +echo + +tag= +# we skip v.0.9 tags, so these can be used on branches without updating release notes +git tag -l 'v*' | grep -v v.0.9 | sed 's/-rc/~/' | sort -rV | sed 's/~/-rc/' | while read last; do + if [ "$tag" != "" ]; then + echo "## $(git for-each-ref --format='%(refname:strip=2) (%(creatordate:short))' refs/tags/${tag})" + echo + git_log='git --no-pager log --no-merges --invert-grep --grep=^\(build\|chore\|ci\|docs\|test\):' + $git_log --format=' * [%h](https://github.com/argoproj/argo-events/commit/%H) %s' $last..$tag + echo + echo "### Contributors" + echo + $git_log --format=' * %an' $last..$tag | sort -u + echo + fi + tag=$last +done + diff --git a/hack/crdgen.sh b/hack/crdgen.sh index 984565accb..aebc4683a5 100755 --- a/hack/crdgen.sh +++ b/hack/crdgen.sh @@ -14,7 +14,9 @@ if [ "$(command -v controller-gen)" = "" ]; then fi header "Generating CRDs" -controller-gen crd:crdVersions=v1,trivialVersions=true,maxDescLen=0 paths=./pkg/apis/... output:dir=manifests/base/crds +controller-gen crd:crdVersions=v1,maxDescLen=262143,maxDescLen=0 paths=./pkg/apis/... output:dir=manifests/base/crds + +mv manifests/base/crds/argoproj.io_eventbuses.yaml manifests/base/crds/argoproj.io_eventbus.yaml || true find manifests/base/crds -name 'argoproj.io*.yaml' | while read -r file; do echo "Patching ${file}" diff --git a/hack/crds.go b/hack/crds.go index 3d08b841d8..6cc2b057a9 100644 --- a/hack/crds.go +++ b/hack/crds.go @@ -1,7 +1,7 @@ package main import ( - "io/ioutil" + "os" "sigs.k8s.io/yaml" ) @@ -9,7 +9,7 @@ import ( type obj = map[string]interface{} func cleanCRD(filename string) { - data, err := ioutil.ReadFile(filename) + data, err := os.ReadFile(filename) if err != nil { panic(err) } @@ -20,10 +20,17 @@ func cleanCRD(filename string) { } delete(crd, "status") metadata := crd["metadata"].(obj) + if metadata["name"] == "eventbuses.argoproj.io" { + metadata["name"] = "eventbus.argoproj.io" + } delete(metadata, "annotations") delete(metadata, "creationTimestamp") spec := crd["spec"].(obj) delete(spec, "validation") + names := spec["names"].(obj) + if names["plural"] == "eventbuses" { + names["plural"] = "eventbus" + } versions := spec["versions"].([]interface{}) version := versions[0].(obj) properties := version["schema"].(obj)["openAPIV3Schema"].(obj)["properties"].(obj) @@ -36,7 +43,7 @@ func cleanCRD(filename string) { if err != nil { panic(err) } - err = ioutil.WriteFile(filename, data, 0666) + err = os.WriteFile(filename, data, 0666) if err != nil { panic(err) } diff --git a/hack/custom-boilerplate.go.txt b/hack/custom-boilerplate.go.txt index c88489e5ec..796ffaa908 100644 --- a/hack/custom-boilerplate.go.txt +++ b/hack/custom-boilerplate.go.txt @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/hack/gen-openapi-spec/main.go b/hack/gen-openapi-spec/main.go index 3494c93245..ca3d19b1c3 100644 --- a/hack/gen-openapi-spec/main.go +++ b/hack/gen-openapi-spec/main.go @@ -2,13 +2,12 @@ package main import ( "encoding/json" - "io/ioutil" "log" "os" "strings" - "github.com/go-openapi/spec" "k8s.io/kube-openapi/pkg/common" + "k8s.io/kube-openapi/pkg/validation/spec" cv1 "github.com/argoproj/argo-events/pkg/apis/common" ebv1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" @@ -16,11 +15,16 @@ import ( sv1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) +type ( + obj = map[string]interface{} +) + // Generate OpenAPI spec definitions for Workflow Resource func main() { if len(os.Args) <= 3 { log.Fatal("Supply a version") } + log.Println(os.Args) version := os.Args[1] kubeSwaggerPath := os.Args[2] output := os.Args[3] @@ -59,6 +63,9 @@ func main() { defs[d] = kd } } + for d, s := range k8sDefinitions { + defs[d] = s + } swagger := &spec.Swagger{ SwaggerProps: spec.SwaggerProps{ @@ -73,11 +80,60 @@ func main() { }, }, } + jsonBytes, err := json.MarshalIndent(swagger, "", " ") if err != nil { log.Fatal(err.Error()) } - err = ioutil.WriteFile(output, jsonBytes, 0644) + err = os.WriteFile(output, jsonBytes, 0644) + if err != nil { + panic(err) + } + f, err := os.Open(output) + if err != nil { + panic(err) + } + // filter out "default" fields from swagger definitions properties because they are being set to empty strings and it makes the swagger validation fail. + swaggerObj := obj{} + err = json.NewDecoder(f).Decode(&swaggerObj) + if err != nil { + panic(err) + } + definitions := swaggerObj["definitions"].(obj) + + for _, d := range definitions { + props, ok := d.(obj)["properties"].(obj) + if ok { + for _, prop := range props { + prop := prop.(obj) + delete(prop, "default") + items, ok := prop["items"].(obj) + if ok { + delete(items, "default") + } + additionalProperties, ok := prop["additionalProperties"].(obj) + if ok { + delete(additionalProperties, "default") + } + } + } + props, ok = d.(obj)["additionalProperties"].(obj) + if ok { + delete(props, "default") + } + } + + f, err = os.Create(output) + if err != nil { + panic(err) + } + e := json.NewEncoder(f) + e.SetIndent("", " ") + err = e.Encode(swaggerObj) + if err != nil { + panic(err) + } + err = f.Close() if err != nil { panic(err) } @@ -101,7 +157,7 @@ func swaggify(name string) string { } func getKubernetesSwagger(kubeSwaggerPath string) spec.Definitions { - data, err := ioutil.ReadFile(kubeSwaggerPath) + data, err := os.ReadFile(kubeSwaggerPath) if err != nil { panic(err) } @@ -110,9 +166,5 @@ func getKubernetesSwagger(kubeSwaggerPath string) spec.Definitions { if err != nil { panic(err) } - err = spec.ExpandSpec(swagger, &spec.ExpandOptions{}) - if err != nil { - panic(err) - } return swagger.Definitions } diff --git a/hack/generate-proto.sh b/hack/generate-proto.sh index 2102543032..43d25825d6 100755 --- a/hack/generate-proto.sh +++ b/hack/generate-proto.sh @@ -7,6 +7,7 @@ set -o pipefail source $(dirname $0)/library.sh header "generating proto files" +ensure_protobuf ensure_vendor if [ "`command -v protoc-gen-gogo`" = "" ]; then @@ -27,7 +28,10 @@ make_fake_paths export GOPATH="${FAKE_GOPATH}" cd "${FAKE_REPOPATH}" -go install -mod=vendor ./vendor/k8s.io/code-generator/cmd/go-to-protobuf +# go < 1.17 +#go install -mod=vendor ./vendor/k8s.io/code-generator/cmd/go-to-protobuf +# go >= 1.17 +GOBIN=${GOPATH}/bin go install -mod=vendor ./vendor/k8s.io/code-generator/cmd/go-to-protobuf export GO111MODULE="off" @@ -42,4 +46,3 @@ ${GOPATH}/bin/go-to-protobuf \ --packages=github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1,github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1,github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1 \ --apimachinery-packages=github.com/argoproj/argo-events/pkg/apis/common,+k8s.io/apimachinery/pkg/util/intstr,+k8s.io/apimachinery/pkg/api/resource,k8s.io/apimachinery/pkg/runtime/schema,+k8s.io/apimachinery/pkg/runtime,k8s.io/apimachinery/pkg/apis/meta/v1,k8s.io/api/core/v1,k8s.io/api/policy/v1beta1 \ --proto-import ./vendor - diff --git a/hack/jsonschema/main.go b/hack/jsonschema/main.go new file mode 100644 index 0000000000..cf654d65ad --- /dev/null +++ b/hack/jsonschema/main.go @@ -0,0 +1,116 @@ +package main + +import ( + "encoding/json" + "fmt" + "os" + "strings" +) + +const ( + group = "argoproj.io" + version = "v1alpha1" + eventBusKind = "EventBus" + eventSourceKind = "EventSource" + sensorKind = "Sensor" +) + +type obj = map[string]interface{} + +func main() { + swagger := obj{} + { + f, err := os.Open("api/openapi-spec/swagger.json") + if err != nil { + panic(err) + } + err = json.NewDecoder(f).Decode(&swagger) + if err != nil { + panic(err) + } + } + { + crdKinds := []string{ + eventBusKind, + eventSourceKind, + sensorKind, + } + definitions := swagger["definitions"] + oneOf := make([]obj, 0, len(crdKinds)) + for _, kind := range crdKinds { + lowerCaseKind := strings.ToLower(kind) + definitionKey := fmt.Sprintf("io.argoproj.%s.%s.%s", lowerCaseKind, version, kind) + v := definitions.(obj)[definitionKey].(obj) + v["x-kubernetes-group-version-kind"] = []obj{ + { + "group": group, + "kind": kind, + "version": version, + }, + } + props := v["properties"].(obj) + props["apiVersion"].(obj)["const"] = fmt.Sprintf("%s/%s", group, version) + props["kind"].(obj)["const"] = kind + oneOf = append(oneOf, obj{"$ref": "#/definitions/" + definitionKey}) + } + + transformInt64OrStringDefinition(definitions.(obj)) + transformK8sIntOrStringDefinitions(definitions.(obj)) + + schema := obj{ + "$id": "http://events.argoproj.io/events.json", + "$schema": "http://json-schema.org/schema#", + "type": "object", + "oneOf": oneOf, + "definitions": definitions, + } + f, err := os.Create("api/jsonschema/schema.json") + if err != nil { + panic(err) + } + + e := json.NewEncoder(f) + e.SetIndent("", " ") + err = e.Encode(schema) + if err != nil { + panic(err) + } + + err = f.Close() + if err != nil { + panic(err) + } + } +} + +func transformInt64OrStringDefinition(definitions obj) { + int64OrStringDefinition := definitions["io.argoproj.common.Int64OrString"].(obj) + int64OrStringDefinition["type"] = []string{ + "integer", + "string", + } +} + +func transformK8sIntOrStringDefinitions(definitions obj) { + for _, d := range definitions { + transformK8sIntOrStringTypesInObject(d.(obj)) + } +} + +func transformK8sIntOrStringTypesInObject(object obj) { + props, ok := object["properties"].(obj) + if !ok { + format, ok := object["format"].(string) + if ok && format == "int-or-string" { + object["type"] = []string{ + "integer", + "string", + } + } + return + } + + for _, prop := range props { + transformK8sIntOrStringTypesInObject(prop.(obj)) + } +} diff --git a/hack/library.sh b/hack/library.sh index f37ffa3ce2..839e90f613 100644 --- a/hack/library.sh +++ b/hack/library.sh @@ -45,16 +45,16 @@ ensure_pandoc() { fi } -ensure_mockery() { - if [ "`command -v mockery`" = "" ]; then - warning "Please install mockery with - brew install vektra/tap/mockery" +ensure_protobuf() { + if [ "`command -v protoc`" = "" ]; then + warning "Please install protobuf with - brew install protobuf" exit 1 fi } -ensure_protobuf() { - if [ "`command -v protoc`" = "" ]; then - warning "Please install protobuf with - brew install protobuf" +ensure_mockery() { + if [ "`command -v mockery`" = "" ]; then + warning "Please install mockery with - brew install vektra/tap/mockery" exit 1 fi } diff --git a/hack/tools.go b/hack/tools.go index e6522f37f4..06452db4e3 100644 --- a/hack/tools.go +++ b/hack/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools // This package contains code generation utilities diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh index f62b3cc0d7..c6d4d34fe5 100755 --- a/hack/update-codegen.sh +++ b/hack/update-codegen.sh @@ -17,6 +17,8 @@ cd "${FAKE_REPOPATH}" CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${FAKE_REPOPATH}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} +chmod +x ${CODEGEN_PKG}/*.sh + subheader "running codegen for sensor" bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \ github.com/argoproj/argo-events/pkg/client/sensor github.com/argoproj/argo-events/pkg/apis \ @@ -38,7 +40,7 @@ bash -x ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ bash -x ${CODEGEN_PKG}/generate-groups.sh "client,informer,lister" \ github.com/argoproj/argo-events/pkg/client/eventbus github.com/argoproj/argo-events/pkg/apis \ "eventbus:v1alpha1" \ - --plural-exceptions EventBus:EventBus \ + --plural-exceptions="EventBus:EventBus" \ --go-header-file hack/custom-boilerplate.go.txt subheader "running codegen for common" diff --git a/hack/update-swagger.sh b/hack/update-swagger.sh index 9eba998f27..1b523d2acb 100755 --- a/hack/update-swagger.sh +++ b/hack/update-swagger.sh @@ -20,11 +20,10 @@ if [ "`command -v swagger`" = "" ]; then go install -mod=vendor ./vendor/github.com/go-swagger/go-swagger/cmd/swagger fi -curl -Ls https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.19/api/openapi-spec/swagger.json -o ${k8s_swagger} +curl -Ls https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.29/api/openapi-spec/swagger.json -o ${k8s_swagger} go run ./hack/gen-openapi-spec/main.go ${VERSION} ${k8s_swagger} ${kubeified_swagger} swagger flatten --with-flatten minimal ${kubeified_swagger} -o ${output} swagger validate ${output} - diff --git a/manifests/base/controller-manager/controller-config.yaml b/manifests/base/controller-manager/controller-config.yaml new file mode 100644 index 0000000000..119648be15 --- /dev/null +++ b/manifests/base/controller-manager/controller-config.yaml @@ -0,0 +1,74 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: argo-events-controller-config +data: + controller-config.yaml: |+ + eventBus: + nats: + versions: + - version: 0.22.1 + natsStreamingImage: nats-streaming:0.22.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.8.0 + jetstream: + # Default JetStream settings, could be overridden by EventBus JetStream specs + settings: | + # https://docs.nats.io/running-a-nats-service/configuration#jetstream + # Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded. + # e.g. 1G. -1 means no limit, up to 75% of available memory + max_memory_store: -1 + # e.g. 20G. -1 means no limit, Up to 1TB if available + max_file_store: 1TB + streamConfig: | + # The default properties of the streams to be created in this JetStream service + maxMsgs: 50000 + maxAge: 168h + maxBytes: -1 + replicas: 3 + duplicates: 300s + versions: + - version: latest + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server + - version: 2.8.1 + natsImage: nats:2.8.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.1-alpine + natsImage: nats:2.8.1-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.8.2 + natsImage: nats:2.8.2 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.2-alpine + natsImage: nats:2.8.2-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.9.1 + natsImage: nats:2.9.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.12 + natsImage: nats:2.9.12 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.16 + natsImage: nats:2.9.16 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.10.10 + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server diff --git a/manifests/base/eventbus-controller/eventbus-controller-deployment.yaml b/manifests/base/controller-manager/controller-manager-deployment.yaml similarity index 58% rename from manifests/base/eventbus-controller/eventbus-controller-deployment.yaml rename to manifests/base/controller-manager/controller-manager-deployment.yaml index 85bbb304f3..9ba1386615 100644 --- a/manifests/base/eventbus-controller/eventbus-controller-deployment.yaml +++ b/manifests/base/controller-manager/controller-manager-deployment.yaml @@ -1,36 +1,37 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: eventbus-controller + name: controller-manager spec: replicas: 1 selector: matchLabels: - app: eventbus-controller + app: controller-manager template: metadata: labels: - app: eventbus-controller + app: controller-manager spec: serviceAccountName: argo-events-sa securityContext: runAsNonRoot: true runAsUser: 9731 containers: - - name: eventbus-controller - image: quay.io/argoproj/argo-events:latest + - name: controller-manager + image: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 imagePullPolicy: Always args: - - eventbus-controller + - controller env: - name: NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - - name: NATS_STREAMING_IMAGE - value: nats-streaming:0.17.0 - - name: NATS_METRICS_EXPORTER_IMAGE - value: synadia/prometheus-nats-exporter:0.6.2 + - name: ARGO_EVENTS_IMAGE + value: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 + volumeMounts: + - mountPath: /etc/argo-events + name: controller-config-volume livenessProbe: httpGet: path: /healthz @@ -43,5 +44,7 @@ spec: port: 8081 initialDelaySeconds: 3 periodSeconds: 3 - - + volumes: + - name: controller-config-volume + configMap: + name: argo-events-controller-config diff --git a/manifests/base/eventbus-controller/kustomization.yaml b/manifests/base/controller-manager/kustomization.yaml similarity index 55% rename from manifests/base/eventbus-controller/kustomization.yaml rename to manifests/base/controller-manager/kustomization.yaml index 26278a9d6e..689b92afa4 100644 --- a/manifests/base/eventbus-controller/kustomization.yaml +++ b/manifests/base/controller-manager/kustomization.yaml @@ -2,4 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - eventbus-controller-deployment.yaml +- controller-config.yaml +- controller-manager-deployment.yaml diff --git a/manifests/base/crds/kustomization.yaml b/manifests/base/crds/kustomization.yaml index 1871141ae5..99933b9db7 100644 --- a/manifests/base/crds/kustomization.yaml +++ b/manifests/base/crds/kustomization.yaml @@ -2,6 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - argoproj.io_eventbus.yaml - - argoproj.io_eventsources.yaml - - argoproj.io_sensors.yaml +- argoproj.io_eventbus.yaml +- argoproj.io_eventsources.yaml +- argoproj.io_sensors.yaml diff --git a/manifests/base/eventsource-controller/eventsource-controller-deployment.yaml b/manifests/base/eventsource-controller/eventsource-controller-deployment.yaml deleted file mode 100644 index f7a91e790b..0000000000 --- a/manifests/base/eventsource-controller/eventsource-controller-deployment.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: eventsource-controller -spec: - replicas: 1 - selector: - matchLabels: - app: eventsource-controller - template: - metadata: - labels: - app: eventsource-controller - spec: - serviceAccountName: argo-events-sa - securityContext: - runAsNonRoot: true - runAsUser: 9731 - containers: - - name: eventsource-controller - image: quay.io/argoproj/argo-events:latest - imagePullPolicy: Always - args: - - eventsource-controller - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: EVENTSOURCE_IMAGE - value: quay.io/argoproj/argo-events:latest - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 diff --git a/manifests/base/eventsource-controller/kustomization.yaml b/manifests/base/eventsource-controller/kustomization.yaml deleted file mode 100644 index 2e4af20477..0000000000 --- a/manifests/base/eventsource-controller/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - eventsource-controller-deployment.yaml diff --git a/manifests/base/kustomization.yaml b/manifests/base/kustomization.yaml index bcea6e9015..9526096a94 100644 --- a/manifests/base/kustomization.yaml +++ b/manifests/base/kustomization.yaml @@ -8,39 +8,24 @@ kind: Kustomization resources: - crds - argo-events-sa.yaml -- eventsource-controller -- sensor-controller -- eventbus-controller +- controller-manager images: - name: quay.io/argoproj/argo-events newName: quay.io/codefresh/argo-events - newTag: v0.0.2-cf + newTag: v1.9.2-cap-CR-24607 -patchesStrategicMerge: -- |- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: eventsource-controller - spec: - template: - spec: - containers: - - name: eventsource-controller - env: - - name: EVENTSOURCE_IMAGE - value: quay.io/codefresh/argo-events:v0.0.2-cf -- |- - apiVersion: apps/v1 - kind: Deployment - metadata: - name: sensor-controller - spec: - template: - spec: - containers: - - name: sensor-controller - env: - - name: SENSOR_IMAGE - value: quay.io/codefresh/argo-events:v0.0.2-cf +patches: +- patch: |- + apiVersion: apps/v1 + kind: Deployment + metadata: + name: controller-manager + spec: + template: + spec: + containers: + - name: controller-manager + env: + - name: ARGO_EVENTS_IMAGE + value: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 diff --git a/manifests/base/sensor-controller/sensor-controller-deployment.yaml b/manifests/base/sensor-controller/sensor-controller-deployment.yaml deleted file mode 100644 index c80321413e..0000000000 --- a/manifests/base/sensor-controller/sensor-controller-deployment.yaml +++ /dev/null @@ -1,42 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sensor-controller -spec: - replicas: 1 - selector: - matchLabels: - app: sensor-controller - template: - metadata: - labels: - app: sensor-controller - spec: - serviceAccountName: argo-events-sa - securityContext: - runAsNonRoot: true - runAsUser: 9731 - containers: - - name: sensor-controller - image: quay.io/argoproj/argo-events:latest - args: - - sensor-controller - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: SENSOR_IMAGE - value: quay.io/argoproj/argo-events:latest - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 diff --git a/manifests/cluster-install/kustomization.yaml b/manifests/cluster-install/kustomization.yaml index 2a085640c1..7a0117cb58 100644 --- a/manifests/cluster-install/kustomization.yaml +++ b/manifests/cluster-install/kustomization.yaml @@ -2,8 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../base - - rbac +- ../base +- rbac namespace: argo-events - diff --git a/manifests/cluster-install/rbac/argo-events-cluster-role.yaml b/manifests/cluster-install/rbac/argo-events-cluster-role.yaml index 0f75111f66..43ffaae496 100644 --- a/manifests/cluster-install/rbac/argo-events-cluster-role.yaml +++ b/manifests/cluster-install/rbac/argo-events-cluster-role.yaml @@ -3,6 +3,25 @@ kind: ClusterRole metadata: name: argo-events-role rules: + - apiGroups: + - "" + resources: + - "events" + verbs: + - "create" + - "patch" + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete - apiGroups: - argoproj.io verbs: @@ -30,7 +49,6 @@ rules: - pods - pods/exec - configmaps - - secrets - services - persistentvolumeclaims verbs: @@ -41,6 +59,18 @@ rules: - update - patch - delete + # Secrets privileges are used to manage the NATs auth secrets. This can be removed from the ClusterRole and granted granularly per Namespace as needed + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - update + - patch + - delete - apiGroups: - apps resources: diff --git a/manifests/cluster-install/rbac/kustomization.yaml b/manifests/cluster-install/rbac/kustomization.yaml index 522513e608..a632c6a5b7 100644 --- a/manifests/cluster-install/rbac/kustomization.yaml +++ b/manifests/cluster-install/rbac/kustomization.yaml @@ -2,8 +2,8 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - argo-events-aggregate-to-admin.yaml - - argo-events-aggregate-to-edit.yaml - - argo-events-aggregate-to-view.yaml - - argo-events-cluster-role.yaml - - argo-events-binding.yaml +- argo-events-aggregate-to-admin.yaml +- argo-events-aggregate-to-edit.yaml +- argo-events-aggregate-to-view.yaml +- argo-events-cluster-role.yaml +- argo-events-binding.yaml diff --git a/manifests/extensions/validating-webhook/events-webhook-deployment.yaml b/manifests/extensions/validating-webhook/events-webhook-deployment.yaml index 532c72e667..b98914a33e 100644 --- a/manifests/extensions/validating-webhook/events-webhook-deployment.yaml +++ b/manifests/extensions/validating-webhook/events-webhook-deployment.yaml @@ -14,7 +14,7 @@ spec: spec: containers: - name: webhook - image: quay.io/argoproj/argo-events:latest + image: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 imagePullPolicy: Always args: - webhook-service @@ -23,4 +23,6 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + - name: PORT + value: "443" serviceAccountName: argo-events-webhook-sa diff --git a/manifests/extensions/validating-webhook/kustomization.yaml b/manifests/extensions/validating-webhook/kustomization.yaml index d8e95df5d7..0e35fa60fd 100644 --- a/manifests/extensions/validating-webhook/kustomization.yaml +++ b/manifests/extensions/validating-webhook/kustomization.yaml @@ -12,4 +12,4 @@ namespace: argo-events images: - name: quay.io/argoproj/argo-events newName: quay.io/codefresh/argo-events - newTag: v0.0.2-cf + newTag: v1.9.2-cap-CR-24607 diff --git a/manifests/extensions/validating-webhook/rbac/argo-events-webhook-cluster-role.yaml b/manifests/extensions/validating-webhook/rbac/argo-events-webhook-cluster-role.yaml index 6409c18181..a0cec21556 100644 --- a/manifests/extensions/validating-webhook/rbac/argo-events-webhook-cluster-role.yaml +++ b/manifests/extensions/validating-webhook/rbac/argo-events-webhook-cluster-role.yaml @@ -52,3 +52,17 @@ rules: - eventbus - eventsources - sensors + - apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - get + - list + - apiGroups: + - apps + resources: + - deployments/finalizers + - clusterroles/finalizers + verbs: + - update diff --git a/manifests/install-validating-webhook.yaml b/manifests/install-validating-webhook.yaml index e2bb439e36..96c68fa7e2 100644 --- a/manifests/install-validating-webhook.yaml +++ b/manifests/install-validating-webhook.yaml @@ -58,6 +58,20 @@ rules: - get - list - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterroles + verbs: + - get + - list +- apiGroups: + - apps + resources: + - deployments/finalizers + - clusterroles/finalizers + verbs: + - update --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -107,7 +121,9 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace - image: quay.io/codefresh/argo-events:v0.0.2-cf + - name: PORT + value: "443" + image: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 imagePullPolicy: Always name: webhook serviceAccountName: argo-events-webhook-sa diff --git a/manifests/install.yaml b/manifests/install.yaml index b31e940e2f..66717ea5d4 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -211,6 +211,25 @@ kind: ClusterRole metadata: name: argo-events-role rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete - apiGroups: - argoproj.io resources: @@ -238,7 +257,6 @@ rules: - pods - pods/exec - configmaps - - secrets - services - persistentvolumeclaims verbs: @@ -249,6 +267,17 @@ rules: - update - patch - delete +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - list + - update + - patch + - delete - apiGroups: - apps resources: @@ -276,79 +305,108 @@ subjects: name: argo-events-sa namespace: argo-events --- -apiVersion: apps/v1 -kind: Deployment +apiVersion: v1 +data: + controller-config.yaml: | + eventBus: + nats: + versions: + - version: 0.22.1 + natsStreamingImage: nats-streaming:0.22.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.8.0 + jetstream: + # Default JetStream settings, could be overridden by EventBus JetStream specs + settings: | + # https://docs.nats.io/running-a-nats-service/configuration#jetstream + # Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded. + # e.g. 1G. -1 means no limit, up to 75% of available memory + max_memory_store: -1 + # e.g. 20G. -1 means no limit, Up to 1TB if available + max_file_store: 1TB + streamConfig: | + # The default properties of the streams to be created in this JetStream service + maxMsgs: 50000 + maxAge: 168h + maxBytes: -1 + replicas: 3 + duplicates: 300s + versions: + - version: latest + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server + - version: 2.8.1 + natsImage: nats:2.8.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.1-alpine + natsImage: nats:2.8.1-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.8.2 + natsImage: nats:2.8.2 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.2-alpine + natsImage: nats:2.8.2-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.9.1 + natsImage: nats:2.9.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.12 + natsImage: nats:2.9.12 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.16 + natsImage: nats:2.9.16 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.10.10 + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server +kind: ConfigMap metadata: - name: eventbus-controller + name: argo-events-controller-config namespace: argo-events -spec: - replicas: 1 - selector: - matchLabels: - app: eventbus-controller - template: - metadata: - labels: - app: eventbus-controller - spec: - containers: - - args: - - eventbus-controller - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: NATS_STREAMING_IMAGE - value: nats-streaming:0.17.0 - - name: NATS_METRICS_EXPORTER_IMAGE - value: synadia/prometheus-nats-exporter:0.6.2 - image: quay.io/codefresh/argo-events:v0.0.2-cf - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - name: eventbus-controller - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - securityContext: - runAsNonRoot: true - runAsUser: 9731 - serviceAccountName: argo-events-sa --- apiVersion: apps/v1 kind: Deployment metadata: - name: eventsource-controller + name: controller-manager namespace: argo-events spec: replicas: 1 selector: matchLabels: - app: eventsource-controller + app: controller-manager template: metadata: labels: - app: eventsource-controller + app: controller-manager spec: containers: - args: - - eventsource-controller + - controller env: - - name: EVENTSOURCE_IMAGE - value: quay.io/codefresh/argo-events:v0.0.2-cf + - name: ARGO_EVENTS_IMAGE + value: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 - name: NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - image: quay.io/codefresh/argo-events:v0.0.2-cf + image: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 imagePullPolicy: Always livenessProbe: httpGet: @@ -356,58 +414,21 @@ spec: port: 8081 initialDelaySeconds: 3 periodSeconds: 3 - name: eventsource-controller - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - securityContext: - runAsNonRoot: true - runAsUser: 9731 - serviceAccountName: argo-events-sa ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sensor-controller - namespace: argo-events -spec: - replicas: 1 - selector: - matchLabels: - app: sensor-controller - template: - metadata: - labels: - app: sensor-controller - spec: - containers: - - args: - - sensor-controller - env: - - name: SENSOR_IMAGE - value: quay.io/codefresh/argo-events:v0.0.2-cf - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/codefresh/argo-events:v0.0.2-cf - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - name: sensor-controller + name: controller-manager readinessProbe: httpGet: path: /readyz port: 8081 initialDelaySeconds: 3 periodSeconds: 3 + volumeMounts: + - mountPath: /etc/argo-events + name: controller-config-volume securityContext: runAsNonRoot: true runAsUser: 9731 serviceAccountName: argo-events-sa + volumes: + - configMap: + name: argo-events-controller-config + name: controller-config-volume diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 6279648bd2..a311831a01 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -130,6 +130,25 @@ metadata: name: argo-events-role namespace: argo-events rules: +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete - apiGroups: - argoproj.io resources: @@ -157,7 +176,6 @@ rules: - pods - pods/exec - configmaps - - secrets - services - persistentvolumeclaims verbs: @@ -168,6 +186,17 @@ rules: - update - patch - delete +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - update + - list + - patch + - delete - apiGroups: - apps resources: @@ -196,81 +225,109 @@ subjects: name: argo-events-sa namespace: argo-events --- -apiVersion: apps/v1 -kind: Deployment +apiVersion: v1 +data: + controller-config.yaml: | + eventBus: + nats: + versions: + - version: 0.22.1 + natsStreamingImage: nats-streaming:0.22.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.8.0 + jetstream: + # Default JetStream settings, could be overridden by EventBus JetStream specs + settings: | + # https://docs.nats.io/running-a-nats-service/configuration#jetstream + # Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded. + # e.g. 1G. -1 means no limit, up to 75% of available memory + max_memory_store: -1 + # e.g. 20G. -1 means no limit, Up to 1TB if available + max_file_store: 1TB + streamConfig: | + # The default properties of the streams to be created in this JetStream service + maxMsgs: 50000 + maxAge: 168h + maxBytes: -1 + replicas: 3 + duplicates: 300s + versions: + - version: latest + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server + - version: 2.8.1 + natsImage: nats:2.8.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.1-alpine + natsImage: nats:2.8.1-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.8.2 + natsImage: nats:2.8.2 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.8.2-alpine + natsImage: nats:2.8.2-alpine + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: nats-server + - version: 2.9.1 + natsImage: nats:2.9.1 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.12 + natsImage: nats:2.9.12 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.9.16 + natsImage: nats:2.9.16 + metricsExporterImage: natsio/prometheus-nats-exporter:0.9.1 + configReloaderImage: natsio/nats-server-config-reloader:0.7.0 + startCommand: /nats-server + - version: 2.10.10 + natsImage: nats:2.10.10 + metricsExporterImage: natsio/prometheus-nats-exporter:0.14.0 + configReloaderImage: natsio/nats-server-config-reloader:0.14.0 + startCommand: /nats-server +kind: ConfigMap metadata: - name: eventbus-controller + name: argo-events-controller-config namespace: argo-events -spec: - replicas: 1 - selector: - matchLabels: - app: eventbus-controller - template: - metadata: - labels: - app: eventbus-controller - spec: - containers: - - args: - - eventbus-controller - - --namespaced - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: NATS_STREAMING_IMAGE - value: nats-streaming:0.17.0 - - name: NATS_METRICS_EXPORTER_IMAGE - value: synadia/prometheus-nats-exporter:0.6.2 - image: quay.io/codefresh/argo-events:v0.0.2-cf - imagePullPolicy: Always - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - name: eventbus-controller - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - securityContext: - runAsNonRoot: true - runAsUser: 9731 - serviceAccountName: argo-events-sa --- apiVersion: apps/v1 kind: Deployment metadata: - name: eventsource-controller + name: controller-manager namespace: argo-events spec: replicas: 1 selector: matchLabels: - app: eventsource-controller + app: controller-manager template: metadata: labels: - app: eventsource-controller + app: controller-manager spec: containers: - args: - - eventsource-controller + - controller - --namespaced env: - - name: EVENTSOURCE_IMAGE - value: quay.io/codefresh/argo-events:v0.0.2-cf + - name: ARGO_EVENTS_IMAGE + value: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 - name: NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - image: quay.io/codefresh/argo-events:v0.0.2-cf + image: quay.io/codefresh/argo-events:v1.9.2-cap-CR-24607 imagePullPolicy: Always livenessProbe: httpGet: @@ -278,59 +335,21 @@ spec: port: 8081 initialDelaySeconds: 3 periodSeconds: 3 - name: eventsource-controller - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - securityContext: - runAsNonRoot: true - runAsUser: 9731 - serviceAccountName: argo-events-sa ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: sensor-controller - namespace: argo-events -spec: - replicas: 1 - selector: - matchLabels: - app: sensor-controller - template: - metadata: - labels: - app: sensor-controller - spec: - containers: - - args: - - sensor-controller - - --namespaced - env: - - name: SENSOR_IMAGE - value: quay.io/codefresh/argo-events:v0.0.2-cf - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - image: quay.io/codefresh/argo-events:v0.0.2-cf - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 3 - periodSeconds: 3 - name: sensor-controller + name: controller-manager readinessProbe: httpGet: path: /readyz port: 8081 initialDelaySeconds: 3 periodSeconds: 3 + volumeMounts: + - mountPath: /etc/argo-events + name: controller-config-volume securityContext: runAsNonRoot: true runAsUser: 9731 serviceAccountName: argo-events-sa + volumes: + - configMap: + name: argo-events-controller-config + name: controller-config-volume diff --git a/manifests/namespace-install/kustomization.yaml b/manifests/namespace-install/kustomization.yaml index 6cc2becf36..31d20da0c6 100644 --- a/manifests/namespace-install/kustomization.yaml +++ b/manifests/namespace-install/kustomization.yaml @@ -2,15 +2,15 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../base - - rbac +- ../base +- rbac namespace: argo-events patches: - - patch: |- - - op: add - path: /spec/template/spec/containers/0/args/- - value: --namespaced - target: - kind: Deployment +- patch: |- + - op: add + path: /spec/template/spec/containers/0/args/- + value: --namespaced + target: + kind: Deployment diff --git a/manifests/namespace-install/rbac/argo-events-role.yaml b/manifests/namespace-install/rbac/argo-events-role.yaml index aa79885438..cd7ce793a6 100644 --- a/manifests/namespace-install/rbac/argo-events-role.yaml +++ b/manifests/namespace-install/rbac/argo-events-role.yaml @@ -3,6 +3,25 @@ kind: Role metadata: name: argo-events-role rules: + - apiGroups: + - "" + resources: + - "events" + verbs: + - "create" + - "patch" + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete - apiGroups: - argoproj.io verbs: @@ -30,7 +49,6 @@ rules: - pods - pods/exec - configmaps - - secrets - services - persistentvolumeclaims verbs: @@ -41,6 +59,18 @@ rules: - update - patch - delete + # Secrets privileges are used to manage the NATs auth secrets. This can be removed from the ClusterRole and granted granularly per Namespace as needed + - apiGroups: + - "" + resources: + - secrets + verbs: + - create + - get + - update + - list + - patch + - delete - apiGroups: - apps resources: diff --git a/manifests/namespace-install/rbac/kustomization.yaml b/manifests/namespace-install/rbac/kustomization.yaml index 887c4ec6b9..66abddb74e 100644 --- a/manifests/namespace-install/rbac/kustomization.yaml +++ b/manifests/namespace-install/rbac/kustomization.yaml @@ -2,5 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - argo-events-role.yaml - - argo-events-role-binding.yaml +- argo-events-role.yaml +- argo-events-role-binding.yaml diff --git a/metrics/metrics.go b/metrics/metrics.go index edb2a96f9d..84dc74c86c 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -3,11 +3,15 @@ package metrics import ( "context" "net/http" + "runtime" + + "github.com/prometheus/client_golang/prometheus/collectors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "go.uber.org/zap" + argoevents "github.com/argoproj/argo-events" "github.com/argoproj/argo-events/common/logging" ) @@ -21,6 +25,16 @@ const ( labelTriggerName = "trigger_name" ) +var ( + buildInfo = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "build_info", + Help: "A metric with a constant '1' value labeled by version from which Argo-Events was built.", + }, + []string{"version", "goversion", "goarch", "commit"}, + ) +) + // Metrics represents EventSource metrics information type Metrics struct { namespace string @@ -167,10 +181,20 @@ func (m *Metrics) ActionDuration(sensorName, triggerName string, num float64) { func (m *Metrics) Run(ctx context.Context, addr string) { log := logging.FromContext(ctx) metricsRegistry := prometheus.NewRegistry() - metricsRegistry.MustRegister(m) + metricsRegistry.MustRegister(collectors.NewGoCollector(), m) + metricsRegistry.MustRegister(buildInfo) + recordBuildInfo() + http.Handle("/metrics", promhttp.HandlerFor(metricsRegistry, promhttp.HandlerOpts{})) + log.Info("starting metrics server") if err := http.ListenAndServe(addr, nil); err != nil { log.Fatalw("failed to start metrics server", zap.Error(err)) } } + +// recordBuildInfo publishes information about Argo-Rollouts version and runtime info through an info metric (gauge). +func recordBuildInfo() { + vers := argoevents.GetVersion() + buildInfo.WithLabelValues(vers.Version, runtime.Version(), runtime.GOARCH, vers.GitCommit).Set(1) +} diff --git a/mkdocs.yml b/mkdocs.yml index 422b5d6e19..ab71423a2a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -4,92 +4,131 @@ strict: true theme: name: material palette: - primary: teal + - scheme: default + primary: teal + toggle: + icon: material/toggle-switch-off-outline + name: Switch to dark mode + - scheme: slate + toggle: + icon: material/toggle-switch + name: Switch to light mode + features: + - navigation.tabs + - navigation.tabs.sticky + - navigation.top font: - text: 'Work Sans' - logo: 'assets/logo.png' -google_analytics: - - 'UA-105170809-2' - - 'auto' + text: Roboto + code: Roboto Mono + logo: "assets/logo.png" +extra: + analytics: + provider: google + property: G-5Z1VTPDL73 markdown_extensions: - codehilite - admonition + - pymdownx.superfences + - pymdownx.details - toc: permalink: true nav: - - Overview: 'index.md' - - Installation: - - 'installation.md' - - 'managed-namespace.md' - - 'validating-admission-webhook.md' - - 'quick_start.md' - - Concepts: - - 'concepts/architecture.md' - - 'concepts/event_source.md' - - 'concepts/sensor.md' - - 'concepts/eventbus.md' - - 'concepts/trigger.md' - - Tutorials: - - 'tutorials/01-introduction.md' - - 'tutorials/02-parameterization.md' - - 'tutorials/03-trigger-sources.md' - - 'tutorials/04-standard-k8s-resources.md' - - 'tutorials/05-trigger-custom-resources.md' - - 'tutorials/06-trigger-conditions.md' - - 'tutorials/07-filters.md' - - 'tutorials/08-policy.md' - - EventSources: - - Setup: - - 'eventsources/setup/amqp.md' - - 'eventsources/setup/aws-sns.md' - - 'eventsources/setup/aws-sqs.md' - - 'eventsources/setup/calendar.md' - - 'eventsources/setup/emitter.md' - - 'eventsources/setup/file.md' - - 'eventsources/setup/gcp-pub-sub.md' - - 'eventsources/setup/github.md' - - 'eventsources/setup/gitlab.md' - - 'eventsources/setup/kafka.md' - - 'eventsources/setup/minio.md' - - 'eventsources/setup/mqtt.md' - - 'eventsources/setup/nats.md' - - 'eventsources/setup/nsq.md' - - 'eventsources/setup/redis.md' - - 'eventsources/setup/resource.md' - - 'eventsources/setup/webhook.md' - - 'eventsources/setup/pulsar.md' - - 'eventsources/multiple-events.md' - - 'eventsources/naming.md' - - 'eventsources/services.md' - - 'eventsources/ha.md' - - 'eventsources/webhook-authentication.md' - - 'eventsources/webhook-health-check.md' - - 'eventsources/calendar-catch-up.md' - - 'eventsources/gcp-pubsub.md' - - 'eventsources/generic.md' - - Sensors: - - Triggers: - - 'sensors/triggers/argo-workflow.md' - - 'sensors/triggers/aws-lambda.md' - - 'sensors/triggers/http-trigger.md' - - 'sensors/triggers/nats-trigger.md' - - 'sensors/triggers/kafka-trigger.md' - - 'sensors/triggers/k8s-object-trigger.md' - - 'sensors/triggers/log.md' - - 'sensors/triggers/openwhisk-trigger.md' - - 'sensors/triggers/slack-trigger.md' - - 'sensors/triggers/azure-event-hubs.md' - - 'sensors/triggers/build-your-own-trigger.md' - - 'sensors/trigger-conditions.md' - - 'sensors/ha.md' - - More Information: 'sensors/more-about-sensors-and-triggers.md' - - 'eventbus.md' - - 'service-accounts.md' - - Operator Guide: - - 'metrics.md' - - HA/DR Recommendations: 'dr_ha_recommendations.md' - - 'developer_guide.md' - - 'FAQ.md' - - Releases ⧉: 'releases.md' - - Roadmap ⧉: https://github.com/argoproj/argo-events/milestones - - Blog ⧉: https://blog.argoproj.io/ + - Home: "index.md" + - Getting Started: + - "quick_start.md" + - Tutorials: + - "tutorials/01-introduction.md" + - "tutorials/02-parameterization.md" + - "tutorials/03-trigger-sources.md" + - "tutorials/04-standard-k8s-resources.md" + - "tutorials/05-trigger-custom-resources.md" + - "tutorials/06-trigger-conditions.md" + - "tutorials/07-policy.md" + - User Guide: + - Concepts: + - "concepts/architecture.md" + - "concepts/event_source.md" + - "concepts/sensor.md" + - "concepts/eventbus.md" + - "concepts/trigger.md" + - EventBus: + - "eventbus/eventbus.md" + - "eventbus/stan.md" + - "eventbus/jetstream.md" + - "eventbus/kafka.md" + - "eventbus/antiaffinity.md" + - EventSources: + - Setup: + - "eventsources/setup/amqp.md" + - "eventsources/setup/aws-sns.md" + - "eventsources/setup/aws-sqs.md" + - "eventsources/setup/azure-service-bus.md" + - "eventsources/setup/azure-queue-storage.md" + - "eventsources/setup/calendar.md" + - "eventsources/setup/emitter.md" + - "eventsources/setup/file.md" + - "eventsources/setup/gcp-pub-sub.md" + - "eventsources/setup/github.md" + - "eventsources/setup/gitlab.md" + - "eventsources/setup/bitbucket.md" + - "eventsources/setup/bitbucketserver.md" + - "eventsources/setup/kafka.md" + - "eventsources/setup/minio.md" + - "eventsources/setup/mqtt.md" + - "eventsources/setup/nats.md" + - "eventsources/setup/nsq.md" + - "eventsources/setup/redis.md" + - "eventsources/setup/redis-streams.md" + - "eventsources/setup/resource.md" + - "eventsources/setup/webhook.md" + - "eventsources/setup/pulsar.md" + - "eventsources/multiple-events.md" + - "eventsources/naming.md" + - "eventsources/services.md" + - "eventsources/ha.md" + - "eventsources/filtering.md" + - "eventsources/webhook-authentication.md" + - "eventsources/webhook-health-check.md" + - "eventsources/calendar-catch-up.md" + - "eventsources/gcp-pubsub.md" + - "eventsources/generic.md" + - Sensors: + - Triggers: + - "sensors/triggers/argo-workflow.md" + - "sensors/triggers/aws-lambda.md" + - "sensors/triggers/http-trigger.md" + - "sensors/triggers/nats-trigger.md" + - "sensors/triggers/kafka-trigger.md" + - "sensors/triggers/k8s-object-trigger.md" + - "sensors/triggers/log.md" + - "sensors/triggers/openwhisk-trigger.md" + - "sensors/triggers/slack-trigger.md" + - "sensors/triggers/azure-event-hubs.md" + - "sensors/triggers/pulsar-trigger.md" + - "sensors/triggers/build-your-own-trigger.md" + - "sensors/trigger-conditions.md" + - "sensors/transform.md" + - "sensors/ha.md" + - Filters: + - "sensors/filters/intro.md" + - "sensors/filters/expr.md" + - "sensors/filters/data.md" + - "sensors/filters/script.md" + - "sensors/filters/ctx.md" + - "sensors/filters/time.md" + - More Information: "sensors/more-about-sensors-and-triggers.md" + - "service-accounts.md" + - "FAQ.md" + - Operator Manual: + - "installation.md" + - "managed-namespace.md" + - "validating-admission-webhook.md" + - "security.md" + - "metrics.md" + - HA/DR Recommendations: "dr_ha_recommendations.md" + - Developer Guide: + - "developer_guide.md" + - "CONTRIBUTING.md" + - Roadmap: https://github.com/argoproj/argo-events/milestones + - Blog: https://blog.argoproj.io/ + - Releases: "releases.md" diff --git a/pkg/apis/common/common.go b/pkg/apis/common/common.go index 9ba618a40b..53f288a0b6 100644 --- a/pkg/apis/common/common.go +++ b/pkg/apis/common/common.go @@ -25,30 +25,37 @@ type EventSourceType string // possible event source types var ( - MinioEvent EventSourceType = "minio" - CalendarEvent EventSourceType = "calendar" - FileEvent EventSourceType = "file" - ResourceEvent EventSourceType = "resource" - WebhookEvent EventSourceType = "webhook" - AMQPEvent EventSourceType = "amqp" - KafkaEvent EventSourceType = "kafka" - MQTTEvent EventSourceType = "mqtt" - NATSEvent EventSourceType = "nats" - SNSEvent EventSourceType = "sns" - SQSEvent EventSourceType = "sqs" - PubSubEvent EventSourceType = "pubsub" - GithubEvent EventSourceType = "github" - GitlabEvent EventSourceType = "gitlab" - HDFSEvent EventSourceType = "hdfs" - SlackEvent EventSourceType = "slack" - StorageGridEvent EventSourceType = "storagegrid" - AzureEventsHub EventSourceType = "azureEventsHub" - StripeEvent EventSourceType = "stripe" - EmitterEvent EventSourceType = "emitter" - RedisEvent EventSourceType = "redis" - NSQEvent EventSourceType = "nsq" - PulsarEvent EventSourceType = "pulsar" - GenericEvent EventSourceType = "generic" + MinioEvent EventSourceType = "minio" + CalendarEvent EventSourceType = "calendar" + FileEvent EventSourceType = "file" + SFTPEvent EventSourceType = "sftp" + ResourceEvent EventSourceType = "resource" + WebhookEvent EventSourceType = "webhook" + AMQPEvent EventSourceType = "amqp" + KafkaEvent EventSourceType = "kafka" + MQTTEvent EventSourceType = "mqtt" + NATSEvent EventSourceType = "nats" + SNSEvent EventSourceType = "sns" + SQSEvent EventSourceType = "sqs" + PubSubEvent EventSourceType = "pubsub" + GerritEvent EventSourceType = "gerrit" + GithubEvent EventSourceType = "github" + GitlabEvent EventSourceType = "gitlab" + HDFSEvent EventSourceType = "hdfs" + SlackEvent EventSourceType = "slack" + StorageGridEvent EventSourceType = "storagegrid" + AzureEventsHub EventSourceType = "azureEventsHub" + AzureQueueStorage EventSourceType = "azureQueueStorage" + AzureServiceBus EventSourceType = "azureServiceBus" + StripeEvent EventSourceType = "stripe" + EmitterEvent EventSourceType = "emitter" + RedisEvent EventSourceType = "redis" + RedisStreamEvent EventSourceType = "redisStream" + NSQEvent EventSourceType = "nsq" + PulsarEvent EventSourceType = "pulsar" + GenericEvent EventSourceType = "generic" + BitbucketServerEvent EventSourceType = "bitbucketserver" + BitbucketEvent EventSourceType = "bitbucket" ) var ( @@ -60,6 +67,7 @@ var ( KafkaEvent, PubSubEvent, AzureEventsHub, + AzureServiceBus, NATSEvent, MQTTEvent, MinioEvent, @@ -67,9 +75,11 @@ var ( NSQEvent, PulsarEvent, RedisEvent, + RedisStreamEvent, ResourceEvent, HDFSEvent, FileEvent, + SFTPEvent, GenericEvent, } ) @@ -79,17 +89,20 @@ type TriggerType string // possible trigger types var ( - OpenWhiskTrigger TriggerType = "OpenWhisk" - ArgoWorkflowTrigger TriggerType = "ArgoWorkflow" - LambdaTrigger TriggerType = "Lambda" - CustomTrigger TriggerType = "Custom" - HTTPTrigger TriggerType = "HTTP" - KafkaTrigger TriggerType = "Kafka" - LogTrigger TriggerType = "Log" - NATSTrigger TriggerType = "NATS" - SlackTrigger TriggerType = "Slack" - K8sTrigger TriggerType = "Kubernetes" - AzureEventHubsTrigger TriggerType = "AzureEventHubs" + OpenWhiskTrigger TriggerType = "OpenWhisk" + ArgoWorkflowTrigger TriggerType = "ArgoWorkflow" + LambdaTrigger TriggerType = "Lambda" + CustomTrigger TriggerType = "Custom" + HTTPTrigger TriggerType = "HTTP" + KafkaTrigger TriggerType = "Kafka" + PulsarTrigger TriggerType = "Pulsar" + LogTrigger TriggerType = "Log" + NATSTrigger TriggerType = "NATS" + SlackTrigger TriggerType = "Slack" + K8sTrigger TriggerType = "Kubernetes" + AzureEventHubsTrigger TriggerType = "AzureEventHubs" + AzureServiceBusTrigger TriggerType = "AzureServiceBus" + EmailTrigger TriggerType = "Email" ) // EventBusType is the type of event bus @@ -97,7 +110,9 @@ type EventBusType string // possible event bus types var ( - EventBusNATS EventBusType = "nats" + EventBusNATS EventBusType = "nats" + EventBusJetStream EventBusType = "jetstream" + EventBusKafka EventBusType = "kafka" ) // BasicAuth contains the reference to K8s secrets that holds the username and password @@ -129,16 +144,9 @@ type TLSConfig struct { ClientCertSecret *corev1.SecretKeySelector `json:"clientCertSecret,omitempty" protobuf:"bytes,2,opt,name=clientCertSecret"` // ClientKeySecret refers to the secret that contains the client key ClientKeySecret *corev1.SecretKeySelector `json:"clientKeySecret,omitempty" protobuf:"bytes,3,opt,name=clientKeySecret"` - - // DeprecatedCACertPath refers the file path that contains the CA cert. - // Deprecated: will be removed in v1.5, use CACertSecret instead - DeprecatedCACertPath string `json:"caCertPath,omitempty" protobuf:"bytes,4,opt,name=caCertPath"` - // DeprecatedClientCertPath refers the file path that contains client cert. - // Deprecated: will be removed in v1.5, use ClientCertSecret instead - DeprecatedClientCertPath string `json:"clientCertPath,omitempty" protobuf:"bytes,5,opt,name=clientCertPath"` - // DeprecatedClientKeyPath refers the file path that contains client key. - // Deprecated: will be removed in v1.5, use ClientKeySecret instead - DeprecatedClientKeyPath string `json:"clientKeyPath,omitempty" protobuf:"bytes,6,opt,name=clientKeyPath"` + // If true, skips creation of TLSConfig with certs and creates an empty TLSConfig. (Defaults to false) + // +optional + InsecureSkipVerify bool `json:"insecureSkipVerify,omitempty" protobuf:"varint,4,opt,name=insecureSkipVerify"` } // SASLConfig refers to SASL configuration for a client @@ -149,16 +157,27 @@ type SASLConfig struct { Mechanism string `json:"mechanism,omitempty" protobuf:"bytes,1,opt,name=mechanism"` // User is the authentication identity (authcid) to present for // SASL/PLAIN or SASL/SCRAM authentication - UserSecret *corev1.SecretKeySelector `json:"userSecret,omitempty" protobuf:"bytes,2,opt,name=user"` + UserSecret *corev1.SecretKeySelector `json:"userSecret,omitempty" protobuf:"bytes,2,opt,name=userSecret"` // Password for SASL/PLAIN authentication - PasswordSecret *corev1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,3,opt,name=password"` + PasswordSecret *corev1.SecretKeySelector `json:"passwordSecret,omitempty" protobuf:"bytes,3,opt,name=passwordSecret"` +} + +// SchemaRegistryConfig refers to configuration for a client +type SchemaRegistryConfig struct { + // Schema Registry URL. + URL string `json:"url" protobuf:"bytes,1,opt,name=url"` + // Schema ID + SchemaID int32 `json:"schemaId" protobuf:"varint,2,name=schemaId"` + // +optional + // SchemaRegistry - basic authentication + Auth BasicAuth `json:"auth,omitempty" protobuf:"bytes,3,opt,name=auth"` } // Backoff for an operation type Backoff struct { // The initial duration in nanoseconds or strings like "1s", "3m" // +optional - Duration *Int64OrString `json:"duration,omitempty" protobuf:"varint,1,opt,name=duration"` + Duration *Int64OrString `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` // Duration is multiplied by factor each iteration // +optional Factor *Amount `json:"factor,omitempty" protobuf:"bytes,2,opt,name=factor"` diff --git a/pkg/apis/common/deepcopy_generated.go b/pkg/apis/common/deepcopy_generated.go index db191c1dfb..e243a97cb3 100644 --- a/pkg/apis/common/deepcopy_generated.go +++ b/pkg/apis/common/deepcopy_generated.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -221,6 +222,11 @@ func (in *S3Artifact) DeepCopyInto(out *S3Artifact) { (*out)[key] = val } } + if in.CACertificate != nil { + in, out := &in.CACertificate, &out.CACertificate + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } return } @@ -292,6 +298,23 @@ func (in *SASLConfig) DeepCopy() *SASLConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaRegistryConfig) DeepCopyInto(out *SchemaRegistryConfig) { + *out = *in + in.Auth.DeepCopyInto(&out.Auth) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaRegistryConfig. +func (in *SchemaRegistryConfig) DeepCopy() *SchemaRegistryConfig { + if in == nil { + return nil + } + out := new(SchemaRegistryConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecureHeader) DeepCopyInto(out *SecureHeader) { *out = *in diff --git a/pkg/apis/common/generated.pb.go b/pkg/apis/common/generated.pb.go index 9f59f40ec7..817e9cb863 100644 --- a/pkg/apis/common/generated.pb.go +++ b/pkg/apis/common/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -352,10 +352,38 @@ func (m *SASLConfig) XXX_DiscardUnknown() { var xxx_messageInfo_SASLConfig proto.InternalMessageInfo +func (m *SchemaRegistryConfig) Reset() { *m = SchemaRegistryConfig{} } +func (*SchemaRegistryConfig) ProtoMessage() {} +func (*SchemaRegistryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_02aae6165a434fa7, []int{11} +} +func (m *SchemaRegistryConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SchemaRegistryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SchemaRegistryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SchemaRegistryConfig.Merge(m, src) +} +func (m *SchemaRegistryConfig) XXX_Size() int { + return m.Size() +} +func (m *SchemaRegistryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SchemaRegistryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_SchemaRegistryConfig proto.InternalMessageInfo + func (m *SecureHeader) Reset() { *m = SecureHeader{} } func (*SecureHeader) ProtoMessage() {} func (*SecureHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_02aae6165a434fa7, []int{11} + return fileDescriptor_02aae6165a434fa7, []int{12} } func (m *SecureHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -383,7 +411,7 @@ var xxx_messageInfo_SecureHeader proto.InternalMessageInfo func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_02aae6165a434fa7, []int{12} + return fileDescriptor_02aae6165a434fa7, []int{13} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -411,7 +439,7 @@ var xxx_messageInfo_Status proto.InternalMessageInfo func (m *TLSConfig) Reset() { *m = TLSConfig{} } func (*TLSConfig) ProtoMessage() {} func (*TLSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_02aae6165a434fa7, []int{13} + return fileDescriptor_02aae6165a434fa7, []int{14} } func (m *TLSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -439,7 +467,7 @@ var xxx_messageInfo_TLSConfig proto.InternalMessageInfo func (m *ValueFromSource) Reset() { *m = ValueFromSource{} } func (*ValueFromSource) ProtoMessage() {} func (*ValueFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_02aae6165a434fa7, []int{14} + return fileDescriptor_02aae6165a434fa7, []int{15} } func (m *ValueFromSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -479,6 +507,7 @@ func init() { proto.RegisterType((*S3Bucket)(nil), "github.com.argoproj.argo_events.pkg.apis.common.S3Bucket") proto.RegisterType((*S3Filter)(nil), "github.com.argoproj.argo_events.pkg.apis.common.S3Filter") proto.RegisterType((*SASLConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.common.SASLConfig") + proto.RegisterType((*SchemaRegistryConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.common.SchemaRegistryConfig") proto.RegisterType((*SecureHeader)(nil), "github.com.argoproj.argo_events.pkg.apis.common.SecureHeader") proto.RegisterType((*Status)(nil), "github.com.argoproj.argo_events.pkg.apis.common.Status") proto.RegisterType((*TLSConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.common.TLSConfig") @@ -490,94 +519,97 @@ func init() { } var fileDescriptor_02aae6165a434fa7 = []byte{ - // 1383 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x97, 0x5b, 0x6f, 0x1b, 0x45, - 0x14, 0x80, 0xb3, 0x71, 0xe2, 0x78, 0x4f, 0xae, 0x4c, 0x23, 0x61, 0x59, 0xd4, 0x1b, 0x2d, 0x02, - 0xa5, 0xd0, 0xae, 0xd5, 0x8b, 0xa0, 0x2d, 0xa8, 0xe0, 0x75, 0x53, 0x35, 0x6d, 0x42, 0xab, 0xd9, - 0xb6, 0x0f, 0xad, 0x10, 0x9a, 0xac, 0xc7, 0xce, 0x36, 0xde, 0x8b, 0x76, 0xc7, 0x69, 0xfd, 0x06, - 0x2f, 0xbc, 0xc2, 0x3f, 0xe0, 0x17, 0xf0, 0x3f, 0xfa, 0x58, 0xf1, 0xd2, 0x3e, 0x19, 0xba, 0xfc, - 0x09, 0xd4, 0x27, 0x34, 0x97, 0x5d, 0xaf, 0xdd, 0x20, 0xd8, 0x88, 0xb7, 0xf5, 0x99, 0x73, 0xbe, - 0x33, 0x73, 0x2e, 0x73, 0xc6, 0xf0, 0x55, 0xdf, 0x63, 0x87, 0xc3, 0x03, 0xcb, 0x0d, 0xfd, 0x16, - 0x89, 0xfb, 0x61, 0x14, 0x87, 0x4f, 0xc5, 0xc7, 0x05, 0x7a, 0x4c, 0x03, 0x96, 0xb4, 0xa2, 0xa3, - 0x7e, 0x8b, 0x44, 0x5e, 0xd2, 0x72, 0x43, 0xdf, 0x0f, 0x83, 0x56, 0x9f, 0x06, 0x34, 0x26, 0x8c, - 0x76, 0xad, 0x28, 0x0e, 0x59, 0x88, 0x5a, 0x13, 0x80, 0x95, 0x01, 0xc4, 0xc7, 0x77, 0x12, 0x60, - 0x45, 0x47, 0x7d, 0x8b, 0x03, 0x2c, 0x09, 0x68, 0x5c, 0x28, 0x78, 0xec, 0x87, 0xfd, 0xb0, 0x25, - 0x38, 0x07, 0xc3, 0x9e, 0xf8, 0x25, 0x7e, 0x88, 0x2f, 0xc9, 0x6f, 0x98, 0x47, 0x57, 0x13, 0xcb, - 0x0b, 0xf9, 0x1e, 0x5a, 0x6e, 0x18, 0xd3, 0xd6, 0xf1, 0xc5, 0xd9, 0x3d, 0x34, 0xae, 0x4c, 0x74, - 0x7c, 0xe2, 0x1e, 0x7a, 0x01, 0x8d, 0x47, 0x93, 0x8d, 0xfb, 0x94, 0x91, 0x13, 0xac, 0xcc, 0x73, - 0x50, 0x6d, 0xfb, 0xe1, 0x30, 0x60, 0xc8, 0x80, 0xc5, 0x63, 0x32, 0x18, 0xd2, 0xba, 0xb6, 0xa5, - 0x6d, 0xaf, 0xd8, 0x7a, 0x3a, 0x36, 0x16, 0x1f, 0x71, 0x01, 0x96, 0x72, 0xf3, 0xb7, 0x79, 0x58, - 0xb2, 0x89, 0x7b, 0x14, 0xf6, 0x7a, 0xe8, 0x10, 0x6a, 0xdd, 0x61, 0x4c, 0x98, 0x17, 0x06, 0x42, - 0x7f, 0xf9, 0xd2, 0x0d, 0xab, 0x64, 0x0c, 0xac, 0xdd, 0x80, 0x7d, 0x76, 0xe5, 0x5e, 0xec, 0xb0, - 0xd8, 0x0b, 0xfa, 0xf6, 0x4a, 0x3a, 0x36, 0x6a, 0x37, 0x15, 0x13, 0xe7, 0x74, 0xf4, 0x04, 0xaa, - 0x3d, 0xe2, 0xb2, 0x30, 0xae, 0xcf, 0x0b, 0x3f, 0x9f, 0x97, 0xf6, 0x23, 0xcf, 0x67, 0x43, 0x3a, - 0x36, 0xaa, 0xb7, 0x04, 0x0a, 0x2b, 0x24, 0x87, 0x3f, 0xf5, 0x18, 0xa3, 0x71, 0xbd, 0xf2, 0x3f, - 0xc0, 0xef, 0x08, 0x14, 0x56, 0x48, 0xf4, 0x21, 0x2c, 0x26, 0x8c, 0x46, 0x49, 0x7d, 0x61, 0x4b, - 0xdb, 0x5e, 0xb4, 0x57, 0x5f, 0x8c, 0x8d, 0x39, 0x1e, 0x54, 0x87, 0x0b, 0xb1, 0x5c, 0x33, 0x7f, - 0xd5, 0x40, 0xb7, 0x49, 0xe2, 0xb9, 0xed, 0x21, 0x3b, 0x44, 0xf7, 0xa0, 0x36, 0x4c, 0x68, 0x1c, - 0x10, 0x9f, 0xaa, 0xb0, 0x7e, 0x64, 0xc9, 0xb4, 0x72, 0xa7, 0x16, 0x4f, 0xbd, 0x75, 0x7c, 0xd1, - 0x72, 0xa8, 0x1b, 0x53, 0x76, 0x97, 0x8e, 0x1c, 0x3a, 0xa0, 0xfc, 0x20, 0x32, 0x7a, 0x0f, 0x95, - 0x29, 0xce, 0x21, 0x1c, 0x18, 0x91, 0x24, 0x79, 0x16, 0xc6, 0x5d, 0x15, 0xbf, 0x32, 0xc0, 0xfb, - 0xca, 0x14, 0xe7, 0x10, 0xf3, 0xd5, 0x3c, 0xe8, 0x9d, 0x30, 0xe8, 0x7a, 0x22, 0x39, 0x17, 0x61, - 0x81, 0x8d, 0x22, 0xb9, 0x57, 0xdd, 0x3e, 0xab, 0x4e, 0xb8, 0xf0, 0x60, 0x14, 0xd1, 0xb7, 0x63, - 0x63, 0x35, 0x57, 0xe4, 0x02, 0x2c, 0x54, 0xd1, 0x1e, 0x54, 0x13, 0x46, 0xd8, 0x30, 0x11, 0xfb, - 0xd1, 0xed, 0x2b, 0xca, 0xa8, 0xea, 0x08, 0xe9, 0xdb, 0xb1, 0x71, 0x42, 0xb1, 0x5b, 0x39, 0x49, - 0x6a, 0x61, 0xc5, 0x40, 0xc7, 0x80, 0x06, 0x24, 0x61, 0x0f, 0x62, 0x12, 0x24, 0xd2, 0x93, 0xe7, - 0x53, 0x95, 0xcc, 0x4f, 0x0a, 0x27, 0xcd, 0x3b, 0x62, 0x92, 0x40, 0xde, 0x11, 0xfc, 0xec, 0xdc, - 0xc2, 0x6e, 0xa8, 0x5d, 0xa0, 0xbd, 0x77, 0x68, 0xf8, 0x04, 0x0f, 0xe8, 0x63, 0xa8, 0xc6, 0x94, - 0x24, 0x61, 0x20, 0x92, 0xab, 0xdb, 0x6b, 0xd9, 0x29, 0xb0, 0x90, 0x62, 0xb5, 0x8a, 0xce, 0xc1, - 0x92, 0x4f, 0x93, 0x84, 0xf4, 0x69, 0x7d, 0x51, 0x28, 0xae, 0x2b, 0xc5, 0xa5, 0x7d, 0x29, 0xc6, - 0xd9, 0xba, 0xf9, 0x93, 0x06, 0xab, 0x53, 0x2d, 0x81, 0xb6, 0x0b, 0xd1, 0xad, 0xd8, 0x9b, 0x33, - 0xd1, 0x5d, 0x28, 0x04, 0xf5, 0x3c, 0xd4, 0x3c, 0x6e, 0xfa, 0x88, 0x0c, 0x44, 0x58, 0x2b, 0xf6, - 0x86, 0xd2, 0xae, 0xed, 0x2a, 0x39, 0xce, 0x35, 0xf8, 0xe6, 0x13, 0x16, 0x73, 0xdd, 0xca, 0xf4, - 0xe6, 0x1d, 0x21, 0xc5, 0x6a, 0xd5, 0xfc, 0x6b, 0x1e, 0x6a, 0xfb, 0x94, 0x91, 0x2e, 0x61, 0x04, - 0xfd, 0xa0, 0xc1, 0x32, 0x09, 0x82, 0x90, 0x89, 0xb6, 0x4c, 0xea, 0xda, 0x56, 0x65, 0x7b, 0xf9, - 0xd2, 0x9d, 0xd2, 0x0d, 0x93, 0x01, 0xad, 0xf6, 0x04, 0xb6, 0x13, 0xb0, 0x78, 0x64, 0x9f, 0x51, - 0xdb, 0x58, 0x2e, 0xac, 0xe0, 0xa2, 0x4f, 0xe4, 0x43, 0x75, 0x40, 0x0e, 0xe8, 0x80, 0xd7, 0x0e, - 0xf7, 0xbe, 0x73, 0x7a, 0xef, 0x7b, 0x82, 0x23, 0x1d, 0xe7, 0xe7, 0x97, 0x42, 0xac, 0x9c, 0x34, - 0x6e, 0xc0, 0xc6, 0xec, 0x26, 0xd1, 0x06, 0x54, 0x8e, 0xe8, 0x48, 0x16, 0x3c, 0xe6, 0x9f, 0x68, - 0x33, 0xbb, 0x37, 0x45, 0x3d, 0xab, 0xcb, 0xf2, 0xfa, 0xfc, 0x55, 0xad, 0x71, 0x0d, 0x96, 0x0b, - 0x6e, 0xca, 0x98, 0x9a, 0x9f, 0x42, 0x0d, 0xd3, 0x24, 0x1c, 0xc6, 0x2e, 0xfd, 0xf7, 0x8b, 0xf9, - 0xe5, 0x22, 0x80, 0x73, 0xb9, 0x1d, 0x33, 0x8f, 0x5f, 0x6b, 0xbc, 0x18, 0x68, 0xd0, 0x8d, 0x42, - 0x2f, 0x60, 0xaa, 0x31, 0xf3, 0x62, 0xd8, 0x51, 0x72, 0x9c, 0x6b, 0xa0, 0x6f, 0xa1, 0x7a, 0x30, - 0x74, 0x8f, 0x28, 0x53, 0xf7, 0xc3, 0xb5, 0xd2, 0x31, 0x75, 0x2e, 0xdb, 0x02, 0x20, 0x2f, 0x41, - 0xf9, 0x8d, 0x15, 0x54, 0x36, 0x4a, 0x9f, 0x8f, 0x89, 0xca, 0x6c, 0xa3, 0x70, 0x29, 0x56, 0xab, - 0xb2, 0x82, 0x13, 0xea, 0x0e, 0x63, 0x2a, 0x5a, 0xaa, 0x56, 0xac, 0x60, 0x29, 0xc7, 0xb9, 0x06, - 0xc2, 0xa0, 0x13, 0xd7, 0xa5, 0x49, 0x72, 0x97, 0x8e, 0x44, 0x63, 0xfd, 0xe7, 0x7b, 0x6d, 0x35, - 0x1d, 0x1b, 0x7a, 0x3b, 0xb3, 0xc5, 0x13, 0x0c, 0x67, 0x26, 0x99, 0x7a, 0xbd, 0x5a, 0x9a, 0x99, - 0x8b, 0xf1, 0x04, 0x83, 0x4c, 0xa8, 0xca, 0xa0, 0xd5, 0x97, 0xb6, 0x2a, 0xdb, 0xba, 0x8c, 0xd0, - 0x8e, 0x90, 0x60, 0xb5, 0xc2, 0x13, 0xd0, 0xf3, 0x06, 0x7c, 0x06, 0xd5, 0x4e, 0x9d, 0x80, 0x5b, - 0x02, 0xa0, 0x46, 0x9c, 0xf8, 0xc6, 0x0a, 0x8a, 0x9e, 0x41, 0xcd, 0x57, 0x45, 0x5f, 0xd7, 0x45, - 0xd7, 0xec, 0x9e, 0xc2, 0x41, 0x56, 0x5c, 0x79, 0x03, 0xc9, 0xce, 0xc9, 0x73, 0x94, 0x89, 0x71, - 0xee, 0xac, 0xf1, 0x05, 0xac, 0x4e, 0x29, 0x97, 0xaa, 0xff, 0xbb, 0x50, 0xcb, 0xca, 0x0a, 0x9d, - 0x2d, 0xd8, 0xd9, 0xcb, 0xca, 0x63, 0x85, 0x47, 0x5a, 0x40, 0xb6, 0x60, 0x41, 0xcc, 0x4b, 0x39, - 0x4e, 0x56, 0xb2, 0x5b, 0xf2, 0x1b, 0x3e, 0x08, 0xc5, 0x8a, 0xf9, 0x98, 0xc3, 0x64, 0x58, 0x78, - 0x3d, 0x46, 0x31, 0xed, 0x79, 0xcf, 0x15, 0x2f, 0xaf, 0xc7, 0xfb, 0x42, 0x8a, 0xd5, 0xaa, 0xb8, - 0x23, 0x87, 0x3d, 0xae, 0x37, 0x3f, 0x73, 0x47, 0x0a, 0x29, 0x56, 0xab, 0xe6, 0xef, 0x1a, 0x80, - 0xd3, 0x76, 0xf6, 0x3a, 0x61, 0xd0, 0xf3, 0xfa, 0xa8, 0x05, 0xba, 0x4f, 0xdd, 0x43, 0x12, 0x78, - 0x89, 0xaf, 0x3c, 0xbc, 0xa7, 0x2c, 0xf5, 0xfd, 0x6c, 0x01, 0x4f, 0x74, 0xd0, 0x2e, 0x2c, 0xf0, - 0x61, 0x5d, 0x6e, 0x38, 0xaf, 0xa5, 0x63, 0x03, 0xf8, 0xb4, 0x97, 0x4b, 0x58, 0x20, 0xd0, 0xc3, - 0xc2, 0xac, 0xaf, 0x94, 0xc1, 0xa1, 0x74, 0x6c, 0xac, 0x65, 0xb3, 0x5e, 0x21, 0x27, 0x13, 0xff, - 0x17, 0x0d, 0x56, 0x1c, 0xd1, 0x76, 0xb7, 0x29, 0xe9, 0xd2, 0x38, 0x0f, 0xb8, 0xf6, 0x4f, 0x01, - 0x47, 0x3e, 0xe8, 0x22, 0x95, 0xb7, 0xe2, 0xd0, 0x57, 0x27, 0xfb, 0xba, 0x74, 0xd1, 0x3d, 0xca, - 0x08, 0x8e, 0xb8, 0x06, 0x65, 0x97, 0xe5, 0x42, 0x3c, 0xf1, 0x60, 0x3e, 0x07, 0xf5, 0x78, 0x40, - 0x01, 0x80, 0x9b, 0xbd, 0x14, 0xb2, 0x11, 0x75, 0xbd, 0xb4, 0xe7, 0xfc, 0xb1, 0x61, 0x23, 0x75, - 0x38, 0xc8, 0x45, 0x09, 0x2e, 0x78, 0x30, 0x7f, 0x5c, 0x00, 0xfd, 0xc1, 0x9e, 0xa3, 0x92, 0xff, - 0x04, 0x56, 0x5c, 0xd2, 0xa1, 0x31, 0x93, 0x31, 0x2c, 0xf7, 0x82, 0xdb, 0x48, 0xc7, 0xc6, 0x4a, - 0xa7, 0x3d, 0x31, 0xc7, 0x53, 0x30, 0xd4, 0x87, 0x0d, 0x77, 0xe0, 0xd1, 0x80, 0x15, 0x1c, 0x94, - 0x2a, 0x9a, 0xcd, 0x74, 0x6c, 0x6c, 0x74, 0x66, 0x10, 0xf8, 0x1d, 0x28, 0xea, 0xc2, 0xba, 0x94, - 0x09, 0x63, 0xe1, 0xa7, 0x54, 0x35, 0x9d, 0x49, 0xc7, 0xc6, 0x7a, 0x67, 0x9a, 0x80, 0x67, 0x91, - 0xe8, 0x4b, 0x00, 0x79, 0xbc, 0xfb, 0x84, 0x1d, 0xaa, 0x47, 0xd4, 0x07, 0x2a, 0xda, 0x9b, 0x37, - 0x69, 0x14, 0x53, 0x97, 0xff, 0x4b, 0x91, 0x01, 0xe1, 0x3a, 0xb8, 0xa0, 0x8f, 0x6e, 0xc3, 0xda, - 0x64, 0xdf, 0x82, 0x20, 0x5f, 0x57, 0x5b, 0x8a, 0x50, 0x2f, 0x10, 0xa6, 0xf4, 0xf0, 0x8c, 0x1d, - 0xda, 0x81, 0xd5, 0x7c, 0x6b, 0x02, 0x54, 0x15, 0x20, 0x43, 0x81, 0xde, 0x9f, 0x05, 0x29, 0x35, - 0x3c, 0x6d, 0x65, 0xbe, 0xd2, 0x60, 0x7d, 0xa6, 0x60, 0x79, 0x39, 0xe4, 0x93, 0x00, 0xd3, 0xde, - 0x29, 0xca, 0xc1, 0x29, 0x98, 0xe3, 0x29, 0x18, 0xea, 0xc3, 0xba, 0x2b, 0xaa, 0x6e, 0x9f, 0x44, - 0x8a, 0x2f, 0xab, 0x61, 0xfb, 0x24, 0x7e, 0xa7, 0xa0, 0x3a, 0x93, 0xa8, 0x69, 0x08, 0x9e, 0xa5, - 0xda, 0xe7, 0x5f, 0xbc, 0x69, 0xce, 0xbd, 0x7c, 0xd3, 0x9c, 0x7b, 0xfd, 0xa6, 0x39, 0xf7, 0x7d, - 0xda, 0xd4, 0x5e, 0xa4, 0x4d, 0xed, 0x65, 0xda, 0xd4, 0x5e, 0xa7, 0x4d, 0xed, 0x8f, 0xb4, 0xa9, - 0xfd, 0xfc, 0x67, 0x73, 0xee, 0x71, 0x55, 0xb6, 0xce, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, - 0x77, 0x0b, 0xa9, 0x4a, 0x0f, 0x00, 0x00, + // 1435 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x97, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x80, 0x4d, 0xcb, 0x96, 0xc5, 0xf1, 0x6f, 0x36, 0x3e, 0x08, 0x02, 0x22, 0x19, 0x2c, 0x5a, + 0x38, 0x6d, 0x43, 0x21, 0x3f, 0x68, 0x93, 0x14, 0x48, 0x2b, 0x3a, 0x0e, 0xea, 0xc4, 0x6e, 0x82, + 0x65, 0xec, 0x43, 0xd2, 0x1f, 0xac, 0xa9, 0x95, 0xc4, 0x48, 0x24, 0x05, 0x72, 0xe9, 0x44, 0xb7, + 0x16, 0x7d, 0x80, 0xf6, 0x0d, 0xfa, 0x04, 0x05, 0xfa, 0x0a, 0xbd, 0xe5, 0x18, 0xf4, 0x92, 0x9c, + 0x84, 0x86, 0x7d, 0x88, 0x16, 0x39, 0x15, 0xfb, 0x43, 0x8a, 0x52, 0x5c, 0xb4, 0x34, 0x7a, 0xa3, + 0x66, 0x67, 0xbe, 0xd9, 0x9d, 0xbf, 0x5d, 0xc1, 0xa7, 0x5d, 0x97, 0xf5, 0xe2, 0x63, 0xd3, 0x09, + 0xbc, 0x26, 0x09, 0xbb, 0xc1, 0x30, 0x0c, 0x9e, 0x88, 0x8f, 0x4b, 0xf4, 0x84, 0xfa, 0x2c, 0x6a, + 0x0e, 0xfb, 0xdd, 0x26, 0x19, 0xba, 0x51, 0xd3, 0x09, 0x3c, 0x2f, 0xf0, 0x9b, 0x5d, 0xea, 0xd3, + 0x90, 0x30, 0xda, 0x36, 0x87, 0x61, 0xc0, 0x02, 0xd4, 0x9c, 0x00, 0xcc, 0x14, 0x20, 0x3e, 0xbe, + 0x91, 0x00, 0x73, 0xd8, 0xef, 0x9a, 0x1c, 0x60, 0x4a, 0x40, 0xed, 0x52, 0xce, 0x63, 0x37, 0xe8, + 0x06, 0x4d, 0xc1, 0x39, 0x8e, 0x3b, 0xe2, 0x97, 0xf8, 0x21, 0xbe, 0x24, 0xbf, 0x66, 0xf4, 0xaf, + 0x47, 0xa6, 0x1b, 0xf0, 0x3d, 0x34, 0x9d, 0x20, 0xa4, 0xcd, 0x93, 0xcb, 0xb3, 0x7b, 0xa8, 0x5d, + 0x9b, 0xe8, 0x78, 0xc4, 0xe9, 0xb9, 0x3e, 0x0d, 0x47, 0x93, 0x8d, 0x7b, 0x94, 0x91, 0x53, 0xac, + 0x8c, 0x8b, 0x50, 0x6e, 0x79, 0x41, 0xec, 0x33, 0xd4, 0x80, 0xc5, 0x13, 0x32, 0x88, 0x69, 0x55, + 0xdb, 0xd2, 0xb6, 0x57, 0x2c, 0x3d, 0x19, 0x37, 0x16, 0x8f, 0xb8, 0x00, 0x4b, 0xb9, 0xf1, 0xdb, + 0x3c, 0x2c, 0x59, 0xc4, 0xe9, 0x07, 0x9d, 0x0e, 0xea, 0x41, 0xa5, 0x1d, 0x87, 0x84, 0xb9, 0x81, + 0x2f, 0xf4, 0x97, 0xaf, 0xdc, 0x32, 0x0b, 0xc6, 0xc0, 0xdc, 0xf3, 0xd9, 0x47, 0xd7, 0xee, 0x87, + 0x36, 0x0b, 0x5d, 0xbf, 0x6b, 0xad, 0x24, 0xe3, 0x46, 0xe5, 0xb6, 0x62, 0xe2, 0x8c, 0x8e, 0x1e, + 0x43, 0xb9, 0x43, 0x1c, 0x16, 0x84, 0xd5, 0x79, 0xe1, 0xe7, 0xe3, 0xc2, 0x7e, 0xe4, 0xf9, 0x2c, + 0x48, 0xc6, 0x8d, 0xf2, 0x1d, 0x81, 0xc2, 0x0a, 0xc9, 0xe1, 0x4f, 0x5c, 0xc6, 0x68, 0x58, 0x2d, + 0xfd, 0x0f, 0xf0, 0xbb, 0x02, 0x85, 0x15, 0x12, 0xbd, 0x03, 0x8b, 0x11, 0xa3, 0xc3, 0xa8, 0xba, + 0xb0, 0xa5, 0x6d, 0x2f, 0x5a, 0xab, 0xcf, 0xc7, 0x8d, 0x39, 0x1e, 0x54, 0x9b, 0x0b, 0xb1, 0x5c, + 0x33, 0x7e, 0xd6, 0x40, 0xb7, 0x48, 0xe4, 0x3a, 0xad, 0x98, 0xf5, 0xd0, 0x7d, 0xa8, 0xc4, 0x11, + 0x0d, 0x7d, 0xe2, 0x51, 0x15, 0xd6, 0x77, 0x4d, 0x99, 0x56, 0xee, 0xd4, 0xe4, 0xa9, 0x37, 0x4f, + 0x2e, 0x9b, 0x36, 0x75, 0x42, 0xca, 0xee, 0xd1, 0x91, 0x4d, 0x07, 0x94, 0x1f, 0x44, 0x46, 0xef, + 0x50, 0x99, 0xe2, 0x0c, 0xc2, 0x81, 0x43, 0x12, 0x45, 0x4f, 0x83, 0xb0, 0xad, 0xe2, 0x57, 0x04, + 0xf8, 0x40, 0x99, 0xe2, 0x0c, 0x62, 0xbc, 0x9c, 0x07, 0x7d, 0x27, 0xf0, 0xdb, 0xae, 0x48, 0xce, + 0x65, 0x58, 0x60, 0xa3, 0xa1, 0xdc, 0xab, 0x6e, 0x5d, 0x50, 0x27, 0x5c, 0x78, 0x38, 0x1a, 0xd2, + 0x37, 0xe3, 0xc6, 0x6a, 0xa6, 0xc8, 0x05, 0x58, 0xa8, 0xa2, 0x7d, 0x28, 0x47, 0x8c, 0xb0, 0x38, + 0x12, 0xfb, 0xd1, 0xad, 0x6b, 0xca, 0xa8, 0x6c, 0x0b, 0xe9, 0x9b, 0x71, 0xe3, 0x94, 0x62, 0x37, + 0x33, 0x92, 0xd4, 0xc2, 0x8a, 0x81, 0x4e, 0x00, 0x0d, 0x48, 0xc4, 0x1e, 0x86, 0xc4, 0x8f, 0xa4, + 0x27, 0xd7, 0xa3, 0x2a, 0x99, 0xef, 0xe7, 0x4e, 0x9a, 0x75, 0xc4, 0x24, 0x81, 0xbc, 0x23, 0xf8, + 0xd9, 0xb9, 0x85, 0x55, 0x53, 0xbb, 0x40, 0xfb, 0x6f, 0xd1, 0xf0, 0x29, 0x1e, 0xd0, 0x7b, 0x50, + 0x0e, 0x29, 0x89, 0x02, 0x5f, 0x24, 0x57, 0xb7, 0xd6, 0xd2, 0x53, 0x60, 0x21, 0xc5, 0x6a, 0x15, + 0x5d, 0x84, 0x25, 0x8f, 0x46, 0x11, 0xe9, 0xd2, 0xea, 0xa2, 0x50, 0x5c, 0x57, 0x8a, 0x4b, 0x07, + 0x52, 0x8c, 0xd3, 0x75, 0xe3, 0x07, 0x0d, 0x56, 0xa7, 0x5a, 0x02, 0x6d, 0xe7, 0xa2, 0x5b, 0xb2, + 0x36, 0x67, 0xa2, 0xbb, 0x90, 0x0b, 0xea, 0x87, 0x50, 0x71, 0xb9, 0xe9, 0x11, 0x19, 0x88, 0xb0, + 0x96, 0xac, 0x0d, 0xa5, 0x5d, 0xd9, 0x53, 0x72, 0x9c, 0x69, 0xf0, 0xcd, 0x47, 0x2c, 0xe4, 0xba, + 0xa5, 0xe9, 0xcd, 0xdb, 0x42, 0x8a, 0xd5, 0xaa, 0xf1, 0xd7, 0x3c, 0x54, 0x0e, 0x28, 0x23, 0x6d, + 0xc2, 0x08, 0xfa, 0x4e, 0x83, 0x65, 0xe2, 0xfb, 0x01, 0x13, 0x6d, 0x19, 0x55, 0xb5, 0xad, 0xd2, + 0xf6, 0xf2, 0x95, 0xbb, 0x85, 0x1b, 0x26, 0x05, 0x9a, 0xad, 0x09, 0x6c, 0xd7, 0x67, 0xe1, 0xc8, + 0x3a, 0xaf, 0xb6, 0xb1, 0x9c, 0x5b, 0xc1, 0x79, 0x9f, 0xc8, 0x83, 0xf2, 0x80, 0x1c, 0xd3, 0x01, + 0xaf, 0x1d, 0xee, 0x7d, 0xf7, 0xec, 0xde, 0xf7, 0x05, 0x47, 0x3a, 0xce, 0xce, 0x2f, 0x85, 0x58, + 0x39, 0xa9, 0xdd, 0x82, 0x8d, 0xd9, 0x4d, 0xa2, 0x0d, 0x28, 0xf5, 0xe9, 0x48, 0x16, 0x3c, 0xe6, + 0x9f, 0x68, 0x33, 0x9d, 0x9b, 0xa2, 0x9e, 0xd5, 0xb0, 0xbc, 0x39, 0x7f, 0x5d, 0xab, 0xdd, 0x80, + 0xe5, 0x9c, 0x9b, 0x22, 0xa6, 0xc6, 0x07, 0x50, 0xc1, 0x34, 0x0a, 0xe2, 0xd0, 0xa1, 0xff, 0x3e, + 0x98, 0x7f, 0x29, 0x03, 0xd8, 0x57, 0x5b, 0x21, 0x73, 0xf9, 0x58, 0xe3, 0xc5, 0x40, 0xfd, 0xf6, + 0x30, 0x70, 0x7d, 0xa6, 0x1a, 0x33, 0x2b, 0x86, 0x5d, 0x25, 0xc7, 0x99, 0x06, 0xfa, 0x0a, 0xca, + 0xc7, 0xb1, 0xd3, 0xa7, 0x4c, 0xcd, 0x87, 0x1b, 0x85, 0x63, 0x6a, 0x5f, 0xb5, 0x04, 0x40, 0x0e, + 0x41, 0xf9, 0x8d, 0x15, 0x54, 0x36, 0x4a, 0x97, 0x5f, 0x13, 0xa5, 0xd9, 0x46, 0xe1, 0x52, 0xac, + 0x56, 0x65, 0x05, 0x47, 0xd4, 0x89, 0x43, 0x2a, 0x5a, 0xaa, 0x92, 0xaf, 0x60, 0x29, 0xc7, 0x99, + 0x06, 0xc2, 0xa0, 0x13, 0xc7, 0xa1, 0x51, 0x74, 0x8f, 0x8e, 0x44, 0x63, 0xfd, 0xe7, 0xb9, 0xb6, + 0x9a, 0x8c, 0x1b, 0x7a, 0x2b, 0xb5, 0xc5, 0x13, 0x0c, 0x67, 0x46, 0xa9, 0x7a, 0xb5, 0x5c, 0x98, + 0x99, 0x89, 0xf1, 0x04, 0x83, 0x0c, 0x28, 0xcb, 0xa0, 0x55, 0x97, 0xb6, 0x4a, 0xdb, 0xba, 0x8c, + 0xd0, 0xae, 0x90, 0x60, 0xb5, 0xc2, 0x13, 0xd0, 0x71, 0x07, 0xfc, 0x0e, 0xaa, 0x9c, 0x39, 0x01, + 0x77, 0x04, 0x40, 0x5d, 0x71, 0xe2, 0x1b, 0x2b, 0x28, 0x7a, 0x0a, 0x15, 0x4f, 0x15, 0x7d, 0x55, + 0x17, 0x5d, 0xb3, 0x77, 0x06, 0x07, 0x69, 0x71, 0x65, 0x0d, 0x24, 0x3b, 0x27, 0xcb, 0x51, 0x2a, + 0xc6, 0x99, 0x33, 0xf4, 0x35, 0xac, 0x3a, 0x64, 0x87, 0x72, 0x43, 0xd7, 0x21, 0x8c, 0x56, 0xa1, + 0x48, 0x4c, 0xcf, 0x25, 0xfc, 0xfe, 0x68, 0xe5, 0xec, 0xf1, 0x34, 0xae, 0xf6, 0x09, 0xac, 0x4e, + 0x6d, 0xa6, 0x50, 0x7f, 0xdd, 0x83, 0x4a, 0x5a, 0xb6, 0xe8, 0x42, 0xce, 0xce, 0x5a, 0x56, 0x27, + 0x2a, 0xf1, 0x4c, 0x0a, 0xc8, 0x16, 0x2c, 0x88, 0xfb, 0x58, 0x5e, 0x57, 0x2b, 0xe9, 0x14, 0xfe, + 0x82, 0x5f, 0xb4, 0x62, 0xc5, 0x78, 0xc4, 0x61, 0x32, 0xec, 0xbc, 0xde, 0x87, 0x21, 0xed, 0xb8, + 0xcf, 0x14, 0x2f, 0xab, 0xf7, 0x07, 0x42, 0x8a, 0xd5, 0xaa, 0x98, 0xc1, 0x71, 0x87, 0xeb, 0xcd, + 0xcf, 0xcc, 0x60, 0x21, 0xc5, 0x6a, 0xd5, 0xf8, 0x53, 0x03, 0xb0, 0x5b, 0xf6, 0xfe, 0x4e, 0xe0, + 0x77, 0xdc, 0x2e, 0x6a, 0x82, 0xee, 0x51, 0xa7, 0x47, 0x7c, 0x37, 0xf2, 0x94, 0x87, 0x73, 0xca, + 0x52, 0x3f, 0x48, 0x17, 0xf0, 0x44, 0x07, 0x1d, 0x02, 0xf0, 0xc7, 0x80, 0x0c, 0x70, 0xb1, 0x27, + 0xc0, 0x5a, 0x32, 0x6e, 0xc0, 0x61, 0x66, 0x8c, 0x73, 0x20, 0x44, 0x60, 0x2d, 0x7d, 0x12, 0x28, + 0x74, 0xa9, 0x08, 0x1a, 0x25, 0xe3, 0xc6, 0xda, 0x83, 0x29, 0x00, 0x9e, 0x01, 0x1a, 0xbf, 0x6a, + 0xb0, 0x69, 0x3b, 0x3d, 0xea, 0x11, 0x3e, 0x2a, 0x22, 0x16, 0x8e, 0x54, 0x0c, 0x2e, 0x40, 0x29, + 0x0e, 0x07, 0xb3, 0xf9, 0x3a, 0xc4, 0xfb, 0x98, 0xcb, 0xf9, 0x24, 0x89, 0x84, 0xd9, 0x9e, 0x7c, + 0xf2, 0x2c, 0x4e, 0xaa, 0x54, 0xe2, 0xf6, 0x6e, 0xe3, 0x4c, 0x03, 0x7d, 0x09, 0x0b, 0x24, 0x66, + 0x3d, 0xb5, 0xfd, 0x9b, 0x85, 0x5b, 0x23, 0x7b, 0xbb, 0x4d, 0x2a, 0x83, 0xff, 0xc2, 0x82, 0x6a, + 0xfc, 0xa4, 0xc1, 0x8a, 0x2d, 0x46, 0xd6, 0xe7, 0x94, 0xb4, 0x69, 0x98, 0x15, 0x93, 0xf6, 0x4f, + 0xc5, 0x84, 0x3c, 0xd0, 0x45, 0x99, 0xde, 0x09, 0x03, 0x4f, 0xe5, 0xeb, 0xb3, 0xc2, 0xbb, 0x3a, + 0x4a, 0x09, 0xb6, 0xb8, 0x42, 0xe4, 0x84, 0xca, 0x84, 0x78, 0xe2, 0xc1, 0x78, 0x06, 0xea, 0xe1, + 0x85, 0x7c, 0x00, 0x27, 0x7d, 0x65, 0xa5, 0xd7, 0x7b, 0xf1, 0x78, 0x64, 0x0f, 0x35, 0x0b, 0xa9, + 0xc3, 0x41, 0x26, 0x8a, 0x70, 0xce, 0x83, 0xf1, 0x7d, 0x09, 0xf4, 0x87, 0xfb, 0xb6, 0x4a, 0xea, + 0x63, 0x58, 0x91, 0xed, 0xad, 0xca, 0xa9, 0xd0, 0xeb, 0x77, 0x23, 0x19, 0x37, 0x56, 0xe4, 0xb0, + 0x50, 0xc5, 0x34, 0x05, 0x43, 0x5d, 0xd8, 0x70, 0x06, 0x2e, 0xf5, 0x59, 0xce, 0x41, 0xa1, 0x56, + 0xd8, 0x4c, 0xc6, 0x8d, 0x8d, 0x9d, 0x19, 0x04, 0x7e, 0x0b, 0x8a, 0xda, 0xb0, 0x2e, 0x65, 0xc2, + 0xb8, 0x78, 0x5f, 0x9c, 0x4f, 0xc6, 0x8d, 0xf5, 0x9d, 0x69, 0x02, 0x9e, 0x45, 0xa2, 0xbb, 0x80, + 0xd2, 0x9b, 0xd0, 0xee, 0xbb, 0xc3, 0x23, 0x1a, 0xba, 0x9d, 0x91, 0xba, 0x35, 0xb3, 0x87, 0xec, + 0xde, 0x5b, 0x1a, 0xf8, 0x14, 0x2b, 0xe3, 0xa5, 0x06, 0xeb, 0x33, 0xd5, 0xc2, 0x73, 0x91, 0x5d, + 0x61, 0x98, 0x76, 0xce, 0x90, 0x0b, 0x3b, 0x67, 0x8e, 0xa7, 0x60, 0xa8, 0x0b, 0xeb, 0x8e, 0x48, + 0xf9, 0x01, 0x19, 0x2a, 0xbe, 0x4c, 0xc5, 0xf6, 0x69, 0xfc, 0x9d, 0x9c, 0xea, 0x4c, 0x94, 0xa6, + 0x21, 0x78, 0x96, 0x6a, 0x1d, 0x3e, 0x7f, 0x5d, 0x9f, 0x7b, 0xf1, 0xba, 0x3e, 0xf7, 0xea, 0x75, + 0x7d, 0xee, 0xdb, 0xa4, 0xae, 0x3d, 0x4f, 0xea, 0xda, 0x8b, 0xa4, 0xae, 0xbd, 0x4a, 0xea, 0xda, + 0xef, 0x49, 0x5d, 0xfb, 0xf1, 0x8f, 0xfa, 0xdc, 0xa3, 0x66, 0xc1, 0xbf, 0xfe, 0x7f, 0x07, 0x00, + 0x00, 0xff, 0xff, 0xdd, 0x9f, 0xa2, 0xd6, 0x2c, 0x10, 0x00, 0x00, } func (m *Amount) Marshal() (dAtA []byte, err error) { @@ -927,6 +959,18 @@ func (m *S3Artifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.CACertificate != nil { + { + size, err := m.CACertificate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -1147,6 +1191,47 @@ func (m *SASLConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SchemaRegistryConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchemaRegistryConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SchemaRegistryConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.SchemaID)) + i-- + dAtA[i] = 0x10 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *SecureHeader) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1244,21 +1329,14 @@ func (m *TLSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.DeprecatedClientKeyPath) - copy(dAtA[i:], m.DeprecatedClientKeyPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedClientKeyPath))) i-- - dAtA[i] = 0x32 - i -= len(m.DeprecatedClientCertPath) - copy(dAtA[i:], m.DeprecatedClientCertPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedClientCertPath))) - i-- - dAtA[i] = 0x2a - i -= len(m.DeprecatedCACertPath) - copy(dAtA[i:], m.DeprecatedCACertPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedCACertPath))) + if m.InsecureSkipVerify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x20 if m.ClientKeySecret != nil { { size, err := m.ClientKeySecret.MarshalToSizedBuffer(dAtA[:i]) @@ -1519,6 +1597,10 @@ func (m *S3Artifact) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + if m.CACertificate != nil { + l = m.CACertificate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -1567,6 +1649,20 @@ func (m *SASLConfig) Size() (n int) { return n } +func (m *SchemaRegistryConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.SchemaID)) + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *SecureHeader) Size() (n int) { if m == nil { return 0 @@ -1615,12 +1711,7 @@ func (m *TLSConfig) Size() (n int) { l = m.ClientKeySecret.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.DeprecatedCACertPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedClientCertPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedClientKeyPath) - n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1772,6 +1863,7 @@ func (this *S3Artifact) String() string { `Events:` + fmt.Sprintf("%v", this.Events) + `,`, `Filter:` + strings.Replace(this.Filter.String(), "S3Filter", "S3Filter", 1) + `,`, `Metadata:` + mapStringForMetadata + `,`, + `CACertificate:` + strings.Replace(fmt.Sprintf("%v", this.CACertificate), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `}`, }, "") return s @@ -1810,6 +1902,18 @@ func (this *SASLConfig) String() string { }, "") return s } +func (this *SchemaRegistryConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SchemaRegistryConfig{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `SchemaID:` + fmt.Sprintf("%v", this.SchemaID) + `,`, + `Auth:` + strings.Replace(strings.Replace(this.Auth.String(), "BasicAuth", "BasicAuth", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *SecureHeader) String() string { if this == nil { return "nil" @@ -1844,9 +1948,7 @@ func (this *TLSConfig) String() string { `CACertSecret:` + strings.Replace(fmt.Sprintf("%v", this.CACertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `ClientCertSecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientCertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `ClientKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ClientKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `DeprecatedCACertPath:` + fmt.Sprintf("%v", this.DeprecatedCACertPath) + `,`, - `DeprecatedClientCertPath:` + fmt.Sprintf("%v", this.DeprecatedClientCertPath) + `,`, - `DeprecatedClientKeyPath:` + fmt.Sprintf("%v", this.DeprecatedClientKeyPath) + `,`, + `InsecureSkipVerify:` + fmt.Sprintf("%v", this.InsecureSkipVerify) + `,`, `}`, }, "") return s @@ -3388,6 +3490,42 @@ func (m *S3Artifact) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CACertificate == nil { + m.CACertificate = &v1.SecretKeySelector{} + } + if err := m.CACertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3791,6 +3929,140 @@ func (m *SASLConfig) Unmarshal(dAtA []byte) error { } return nil } +func (m *SchemaRegistryConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchemaRegistryConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchemaRegistryConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) + } + m.SchemaID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SchemaID |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SecureHeader) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4131,74 +4403,10 @@ func (m *TLSConfig) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCACertPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedCACertPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedClientCertPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedClientCertPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedClientKeyPath", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipVerify", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4208,24 +4416,12 @@ func (m *TLSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedClientKeyPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.InsecureSkipVerify = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/common/generated.proto b/pkg/apis/common/generated.proto index 46b88c911a..5936197ab9 100644 --- a/pkg/apis/common/generated.proto +++ b/pkg/apis/common/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package github.com.argoproj.argo_events.pkg.apis.common; @@ -24,7 +24,7 @@ import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "common"; +option go_package = "github.com/argoproj/argo-events/pkg/apis/common"; // Amount represent a numeric amount. message Amount { @@ -122,6 +122,8 @@ message S3Artifact { optional S3Filter filter = 8; map metadata = 9; + + optional k8s.io.api.core.v1.SecretKeySelector caCertificate = 10; } // S3Bucket contains information to describe an S3 Bucket @@ -147,10 +149,23 @@ message SASLConfig { // User is the authentication identity (authcid) to present for // SASL/PLAIN or SASL/SCRAM authentication - optional k8s.io.api.core.v1.SecretKeySelector user = 2; + optional k8s.io.api.core.v1.SecretKeySelector userSecret = 2; // Password for SASL/PLAIN authentication - optional k8s.io.api.core.v1.SecretKeySelector password = 3; + optional k8s.io.api.core.v1.SecretKeySelector passwordSecret = 3; +} + +// SchemaRegistryConfig refers to configuration for a client +message SchemaRegistryConfig { + // Schema Registry URL. + optional string url = 1; + + // Schema ID + optional int32 schemaId = 2; + + // +optional + // SchemaRegistry - basic authentication + optional BasicAuth auth = 3; } // SecureHeader refers to HTTP Headers with auth tokens as values @@ -181,17 +196,9 @@ message TLSConfig { // ClientKeySecret refers to the secret that contains the client key optional k8s.io.api.core.v1.SecretKeySelector clientKeySecret = 3; - // DeprecatedCACertPath refers the file path that contains the CA cert. - // Deprecated: will be removed in v1.5, use CACertSecret instead - optional string caCertPath = 4; - - // DeprecatedClientCertPath refers the file path that contains client cert. - // Deprecated: will be removed in v1.5, use ClientCertSecret instead - optional string clientCertPath = 5; - - // DeprecatedClientKeyPath refers the file path that contains client key. - // Deprecated: will be removed in v1.5, use ClientKeySecret instead - optional string clientKeyPath = 6; + // If true, skips creation of TLSConfig with certs and creates an empty TLSConfig. (Defaults to false) + // +optional + optional bool insecureSkipVerify = 4; } // ValueFromSource allows you to reference keys from either a Configmap or Secret diff --git a/pkg/apis/common/openapi_generated.go b/pkg/apis/common/openapi_generated.go index f47e9e13d5..54ad14508c 100644 --- a/pkg/apis/common/openapi_generated.go +++ b/pkg/apis/common/openapi_generated.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,27 +24,28 @@ limitations under the License. package common import ( - spec "github.com/go-openapi/spec" common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" ) func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/argoproj/argo-events/pkg/apis/common.Amount": schema_argo_events_pkg_apis_common_Amount(ref), - "github.com/argoproj/argo-events/pkg/apis/common.Backoff": schema_argo_events_pkg_apis_common_Backoff(ref), - "github.com/argoproj/argo-events/pkg/apis/common.BasicAuth": schema_argo_events_pkg_apis_common_BasicAuth(ref), - "github.com/argoproj/argo-events/pkg/apis/common.Condition": schema_argo_events_pkg_apis_common_Condition(ref), - "github.com/argoproj/argo-events/pkg/apis/common.Int64OrString": schema_argo_events_pkg_apis_common_Int64OrString(ref), - "github.com/argoproj/argo-events/pkg/apis/common.Metadata": schema_argo_events_pkg_apis_common_Metadata(ref), - "github.com/argoproj/argo-events/pkg/apis/common.Resource": schema_argo_events_pkg_apis_common_Resource(ref), - "github.com/argoproj/argo-events/pkg/apis/common.S3Artifact": schema_argo_events_pkg_apis_common_S3Artifact(ref), - "github.com/argoproj/argo-events/pkg/apis/common.S3Bucket": schema_argo_events_pkg_apis_common_S3Bucket(ref), - "github.com/argoproj/argo-events/pkg/apis/common.S3Filter": schema_argo_events_pkg_apis_common_S3Filter(ref), - "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig": schema_argo_events_pkg_apis_common_SASLConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/common.SecureHeader": schema_argo_events_pkg_apis_common_SecureHeader(ref), - "github.com/argoproj/argo-events/pkg/apis/common.Status": schema_argo_events_pkg_apis_common_Status(ref), - "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig": schema_argo_events_pkg_apis_common_TLSConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/common.ValueFromSource": schema_argo_events_pkg_apis_common_ValueFromSource(ref), + "github.com/argoproj/argo-events/pkg/apis/common.Amount": schema_argo_events_pkg_apis_common_Amount(ref), + "github.com/argoproj/argo-events/pkg/apis/common.Backoff": schema_argo_events_pkg_apis_common_Backoff(ref), + "github.com/argoproj/argo-events/pkg/apis/common.BasicAuth": schema_argo_events_pkg_apis_common_BasicAuth(ref), + "github.com/argoproj/argo-events/pkg/apis/common.Condition": schema_argo_events_pkg_apis_common_Condition(ref), + "github.com/argoproj/argo-events/pkg/apis/common.Int64OrString": schema_argo_events_pkg_apis_common_Int64OrString(ref), + "github.com/argoproj/argo-events/pkg/apis/common.Metadata": schema_argo_events_pkg_apis_common_Metadata(ref), + "github.com/argoproj/argo-events/pkg/apis/common.Resource": schema_argo_events_pkg_apis_common_Resource(ref), + "github.com/argoproj/argo-events/pkg/apis/common.S3Artifact": schema_argo_events_pkg_apis_common_S3Artifact(ref), + "github.com/argoproj/argo-events/pkg/apis/common.S3Bucket": schema_argo_events_pkg_apis_common_S3Bucket(ref), + "github.com/argoproj/argo-events/pkg/apis/common.S3Filter": schema_argo_events_pkg_apis_common_S3Filter(ref), + "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig": schema_argo_events_pkg_apis_common_SASLConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/common.SchemaRegistryConfig": schema_argo_events_pkg_apis_common_SchemaRegistryConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/common.SecureHeader": schema_argo_events_pkg_apis_common_SecureHeader(ref), + "github.com/argoproj/argo-events/pkg/apis/common.Status": schema_argo_events_pkg_apis_common_Status(ref), + "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig": schema_argo_events_pkg_apis_common_TLSConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/common.ValueFromSource": schema_argo_events_pkg_apis_common_ValueFromSource(ref), } } @@ -136,6 +138,7 @@ func schema_argo_events_pkg_apis_common_Condition(ref common.ReferenceCallback) "type": { SchemaProps: spec.SchemaProps{ Description: "Condition type.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -143,6 +146,7 @@ func schema_argo_events_pkg_apis_common_Condition(ref common.ReferenceCallback) "status": { SchemaProps: spec.SchemaProps{ Description: "Condition status, True, False or Unknown.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -201,8 +205,9 @@ func schema_argo_events_pkg_apis_common_Metadata(ref common.ReferenceCallback) c Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -215,8 +220,9 @@ func schema_argo_events_pkg_apis_common_Metadata(ref common.ReferenceCallback) c Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -249,8 +255,9 @@ func schema_argo_events_pkg_apis_common_S3Artifact(ref common.ReferenceCallback) Properties: map[string]spec.Schema{ "endpoint": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, "bucket": { @@ -286,8 +293,9 @@ func schema_argo_events_pkg_apis_common_S3Artifact(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -305,13 +313,19 @@ func schema_argo_events_pkg_apis_common_S3Artifact(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "caCertificate": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, }, Required: []string{"endpoint", "bucket", "accessKey", "secretKey"}, }, @@ -336,8 +350,9 @@ func schema_argo_events_pkg_apis_common_S3Bucket(ref common.ReferenceCallback) c }, "name": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -356,14 +371,16 @@ func schema_argo_events_pkg_apis_common_S3Filter(ref common.ReferenceCallback) c Properties: map[string]spec.Schema{ "prefix": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, "suffix": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -407,6 +424,45 @@ func schema_argo_events_pkg_apis_common_SASLConfig(ref common.ReferenceCallback) } } +func schema_argo_events_pkg_apis_common_SchemaRegistryConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SchemaRegistryConfig refers to configuration for a client", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "Schema Registry URL.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "schemaId": { + SchemaProps: spec.SchemaProps{ + Description: "Schema ID", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + "auth": { + SchemaProps: spec.SchemaProps{ + Description: "SchemaRegistry - basic authentication", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.BasicAuth"), + }, + }, + }, + Required: []string{"url", "schemaId"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.BasicAuth"}, + } +} + func schema_argo_events_pkg_apis_common_SecureHeader(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -454,7 +510,8 @@ func schema_argo_events_pkg_apis_common_Status(ref common.ReferenceCallback) com Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), }, }, }, @@ -493,24 +550,10 @@ func schema_argo_events_pkg_apis_common_TLSConfig(ref common.ReferenceCallback) Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, - "caCertPath": { - SchemaProps: spec.SchemaProps{ - Description: "DeprecatedCACertPath refers the file path that contains the CA cert. Deprecated: will be removed in v1.5, use CACertSecret instead", - Type: []string{"string"}, - Format: "", - }, - }, - "clientCertPath": { + "insecureSkipVerify": { SchemaProps: spec.SchemaProps{ - Description: "DeprecatedClientCertPath refers the file path that contains client cert. Deprecated: will be removed in v1.5, use ClientCertSecret instead", - Type: []string{"string"}, - Format: "", - }, - }, - "clientKeyPath": { - SchemaProps: spec.SchemaProps{ - Description: "DeprecatedClientKeyPath refers the file path that contains client key. Deprecated: will be removed in v1.5, use ClientKeySecret instead", - Type: []string{"string"}, + Description: "If true, skips creation of TLSConfig with certs and creates an empty TLSConfig. (Defaults to false)", + Type: []string{"boolean"}, Format: "", }, }, diff --git a/pkg/apis/common/s3.go b/pkg/apis/common/s3.go index 4199ccdcf2..8af4726a54 100644 --- a/pkg/apis/common/s3.go +++ b/pkg/apis/common/s3.go @@ -32,6 +32,8 @@ type S3Artifact struct { Events []string `json:"events,omitempty" protobuf:"bytes,7,rep,name=events"` Filter *S3Filter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,9,opt,name=metadata"` + + CACertificate *corev1.SecretKeySelector `json:"caCertificate,omitempty" protobuf:"bytes,10,opt,name=caCertificate"` } // S3Bucket contains information to describe an S3 Bucket diff --git a/pkg/apis/common/validate.go b/pkg/apis/common/validate.go index 7655e04b0e..106af5dffc 100644 --- a/pkg/apis/common/validate.go +++ b/pkg/apis/common/validate.go @@ -9,17 +9,22 @@ func ValidateTLSConfig(tlsConfig *TLSConfig) error { if tlsConfig == nil { return nil } + + if tlsConfig.InsecureSkipVerify { + return nil + } + var caCertSet, clientCertSet, clientKeySet bool - if tlsConfig.CACertSecret != nil || tlsConfig.DeprecatedCACertPath != "" { + if tlsConfig.CACertSecret != nil { caCertSet = true } - if tlsConfig.ClientCertSecret != nil || tlsConfig.DeprecatedClientCertPath != "" { + if tlsConfig.ClientCertSecret != nil { clientCertSet = true } - if tlsConfig.ClientKeySecret != nil || tlsConfig.DeprecatedClientKeyPath != "" { + if tlsConfig.ClientKeySecret != nil { clientKeySet = true } diff --git a/pkg/apis/common/validate_test.go b/pkg/apis/common/validate_test.go index cf00f53e39..7146cbefab 100644 --- a/pkg/apis/common/validate_test.go +++ b/pkg/apis/common/validate_test.go @@ -8,27 +8,31 @@ import ( corev1 "k8s.io/api/core/v1" ) -func fakeTLSConfig(t *testing.T) *TLSConfig { +func fakeTLSConfig(t *testing.T, insecureSkipVerify bool) *TLSConfig { t.Helper() - return &TLSConfig{ - CACertSecret: &corev1.SecretKeySelector{ - Key: "fake-key1", - LocalObjectReference: corev1.LocalObjectReference{ - Name: "fake-name1", + if insecureSkipVerify == true { + return &TLSConfig{InsecureSkipVerify: true} + } else { + return &TLSConfig{ + CACertSecret: &corev1.SecretKeySelector{ + Key: "fake-key1", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "fake-name1", + }, }, - }, - ClientCertSecret: &corev1.SecretKeySelector{ - Key: "fake-key2", - LocalObjectReference: corev1.LocalObjectReference{ - Name: "fake-name2", + ClientCertSecret: &corev1.SecretKeySelector{ + Key: "fake-key2", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "fake-name2", + }, }, - }, - ClientKeySecret: &corev1.SecretKeySelector{ - Key: "fake-key3", - LocalObjectReference: corev1.LocalObjectReference{ - Name: "fake-name3", + ClientKeySecret: &corev1.SecretKeySelector{ + Key: "fake-key3", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "fake-name3", + }, }, - }, + } } } @@ -60,8 +64,14 @@ func TestValidateTLSConfig(t *testing.T) { assert.True(t, strings.Contains(err.Error(), "please configure either caCertSecret, or clientCertSecret and clientKeySecret, or both")) }) + t.Run("test insecureSkipVerify true", func(t *testing.T) { + c := &TLSConfig{InsecureSkipVerify: true} + err := ValidateTLSConfig(c) + assert.Nil(t, err) + }) + t.Run("test clientKeySecret is set, clientCertSecret is empty", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) c.CACertSecret = nil c.ClientCertSecret = nil err := ValidateTLSConfig(c) @@ -70,7 +80,7 @@ func TestValidateTLSConfig(t *testing.T) { }) t.Run("test only caCertSecret is set", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) c.ClientCertSecret = nil c.ClientKeySecret = nil err := ValidateTLSConfig(c) @@ -78,14 +88,14 @@ func TestValidateTLSConfig(t *testing.T) { }) t.Run("test clientCertSecret and clientKeySecret are set", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) c.CACertSecret = nil err := ValidateTLSConfig(c) assert.Nil(t, err) }) t.Run("test all of 3 are set", func(t *testing.T) { - c := fakeTLSConfig(t) + c := fakeTLSConfig(t, false) err := ValidateTLSConfig(c) assert.Nil(t, err) }) diff --git a/pkg/apis/eventbus/v1alpha1/container_template.go b/pkg/apis/eventbus/v1alpha1/container_template.go new file mode 100644 index 0000000000..0dac09f046 --- /dev/null +++ b/pkg/apis/eventbus/v1alpha1/container_template.go @@ -0,0 +1,10 @@ +package v1alpha1 + +import corev1 "k8s.io/api/core/v1" + +// ContainerTemplate defines customized spec for a container +type ContainerTemplate struct { + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,2,opt,name=imagePullPolicy,casttype=PullPolicy"` + SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,3,opt,name=securityContext"` +} diff --git a/pkg/apis/eventbus/v1alpha1/eventbus_types.go b/pkg/apis/eventbus/v1alpha1/eventbus_types.go new file mode 100644 index 0000000000..96259d1872 --- /dev/null +++ b/pkg/apis/eventbus/v1alpha1/eventbus_types.go @@ -0,0 +1,101 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-events/pkg/apis/common" +) + +// EventBus is the definition of a eventbus resource +// +genclient +// +kubebuilder:resource:singular=eventbus,shortName=eb +// +kubebuilder:subresource:status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +type EventBus struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Spec EventBusSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status EventBusStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// EventBusList is the list of eventbus resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type EventBusList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + Items []EventBus `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// EventBusSpec refers to specification of eventbus resource +type EventBusSpec struct { + // NATS eventbus + // +optional + NATS *NATSBus `json:"nats,omitempty" protobuf:"bytes,1,opt,name=nats"` + // +optional + JetStream *JetStreamBus `json:"jetstream,omitempty" protobuf:"bytes,2,opt,name=jetstream"` + // +optional + // Kafka eventbus + Kafka *KafkaBus `json:"kafka,omitempty" protobuf:"bytes,3,opt,name=kafka"` + // Exotic JetStream + // +optional + JetStreamExotic *JetStreamConfig `json:"jetstreamExotic,omitempty" protobuf:"bytes,4,opt,name=jetstreamExotic"` +} + +// EventBusStatus holds the status of the eventbus resource +type EventBusStatus struct { + common.Status `json:",inline" protobuf:"bytes,1,opt,name=status"` + // Config holds the fininalized configuration of EventBus + Config BusConfig `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` +} + +// BusConfig has the finalized configuration for EventBus +type BusConfig struct { + // +optional + NATS *NATSConfig `json:"nats,omitempty" protobuf:"bytes,1,opt,name=nats"` + // +optional + JetStream *JetStreamConfig `json:"jetstream,omitempty" protobuf:"bytes,2,opt,name=jetstream"` + // +optional + Kafka *KafkaBus `json:"kafka,omitempty" protobuf:"bytes,3,opt,name=kafka"` +} + +const ( + // EventBusConditionDeployed has the status True when the EventBus + // has its RestfulSet/Deployment ans service created. + EventBusConditionDeployed common.ConditionType = "Deployed" + // EventBusConditionConfigured has the status True when the EventBus + // has its configuration ready. + EventBusConditionConfigured common.ConditionType = "Configured" +) + +// InitConditions sets conditions to Unknown state. +func (s *EventBusStatus) InitConditions() { + s.InitializeConditions(EventBusConditionDeployed, EventBusConditionConfigured) +} + +// MarkDeployed set the bus has been deployed. +func (s *EventBusStatus) MarkDeployed(reason, message string) { + s.MarkTrueWithReason(EventBusConditionDeployed, reason, message) +} + +// MarkDeploying set the bus is deploying +func (s *EventBusStatus) MarkDeploying(reason, message string) { + s.MarkUnknown(EventBusConditionDeployed, reason, message) +} + +// MarkDeployFailed set the bus deploy failed +func (s *EventBusStatus) MarkDeployFailed(reason, message string) { + s.MarkFalse(EventBusConditionDeployed, reason, message) +} + +// MarkConfigured set the bus configuration has been done. +func (s *EventBusStatus) MarkConfigured() { + s.MarkTrue(EventBusConditionConfigured) +} + +// MarkNotConfigured set the bus status not configured. +func (s *EventBusStatus) MarkNotConfigured(reason, message string) { + s.MarkFalse(EventBusConditionConfigured, reason, message) +} diff --git a/pkg/apis/eventbus/v1alpha1/types_test.go b/pkg/apis/eventbus/v1alpha1/eventbus_types_test.go similarity index 100% rename from pkg/apis/eventbus/v1alpha1/types_test.go rename to pkg/apis/eventbus/v1alpha1/eventbus_types_test.go diff --git a/pkg/apis/eventbus/v1alpha1/generated.pb.go b/pkg/apis/eventbus/v1alpha1/generated.pb.go index c6dab36e7d..13c7289e33 100644 --- a/pkg/apis/eventbus/v1alpha1/generated.pb.go +++ b/pkg/apis/eventbus/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -216,10 +216,122 @@ func (m *EventBusStatus) XXX_DiscardUnknown() { var xxx_messageInfo_EventBusStatus proto.InternalMessageInfo +func (m *JetStreamBus) Reset() { *m = JetStreamBus{} } +func (*JetStreamBus) ProtoMessage() {} +func (*JetStreamBus) Descriptor() ([]byte, []int) { + return fileDescriptor_871e47633eb7aad4, []int{6} +} +func (m *JetStreamBus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JetStreamBus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JetStreamBus) XXX_Merge(src proto.Message) { + xxx_messageInfo_JetStreamBus.Merge(m, src) +} +func (m *JetStreamBus) XXX_Size() int { + return m.Size() +} +func (m *JetStreamBus) XXX_DiscardUnknown() { + xxx_messageInfo_JetStreamBus.DiscardUnknown(m) +} + +var xxx_messageInfo_JetStreamBus proto.InternalMessageInfo + +func (m *JetStreamConfig) Reset() { *m = JetStreamConfig{} } +func (*JetStreamConfig) ProtoMessage() {} +func (*JetStreamConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_871e47633eb7aad4, []int{7} +} +func (m *JetStreamConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JetStreamConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JetStreamConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_JetStreamConfig.Merge(m, src) +} +func (m *JetStreamConfig) XXX_Size() int { + return m.Size() +} +func (m *JetStreamConfig) XXX_DiscardUnknown() { + xxx_messageInfo_JetStreamConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_JetStreamConfig proto.InternalMessageInfo + +func (m *KafkaBus) Reset() { *m = KafkaBus{} } +func (*KafkaBus) ProtoMessage() {} +func (*KafkaBus) Descriptor() ([]byte, []int) { + return fileDescriptor_871e47633eb7aad4, []int{8} +} +func (m *KafkaBus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KafkaBus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KafkaBus) XXX_Merge(src proto.Message) { + xxx_messageInfo_KafkaBus.Merge(m, src) +} +func (m *KafkaBus) XXX_Size() int { + return m.Size() +} +func (m *KafkaBus) XXX_DiscardUnknown() { + xxx_messageInfo_KafkaBus.DiscardUnknown(m) +} + +var xxx_messageInfo_KafkaBus proto.InternalMessageInfo + +func (m *KafkaConsumerGroup) Reset() { *m = KafkaConsumerGroup{} } +func (*KafkaConsumerGroup) ProtoMessage() {} +func (*KafkaConsumerGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_871e47633eb7aad4, []int{9} +} +func (m *KafkaConsumerGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KafkaConsumerGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KafkaConsumerGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_KafkaConsumerGroup.Merge(m, src) +} +func (m *KafkaConsumerGroup) XXX_Size() int { + return m.Size() +} +func (m *KafkaConsumerGroup) XXX_DiscardUnknown() { + xxx_messageInfo_KafkaConsumerGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_KafkaConsumerGroup proto.InternalMessageInfo + func (m *NATSBus) Reset() { *m = NATSBus{} } func (*NATSBus) ProtoMessage() {} func (*NATSBus) Descriptor() ([]byte, []int) { - return fileDescriptor_871e47633eb7aad4, []int{6} + return fileDescriptor_871e47633eb7aad4, []int{10} } func (m *NATSBus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -247,7 +359,7 @@ var xxx_messageInfo_NATSBus proto.InternalMessageInfo func (m *NATSConfig) Reset() { *m = NATSConfig{} } func (*NATSConfig) ProtoMessage() {} func (*NATSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_871e47633eb7aad4, []int{7} + return fileDescriptor_871e47633eb7aad4, []int{11} } func (m *NATSConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +387,7 @@ var xxx_messageInfo_NATSConfig proto.InternalMessageInfo func (m *NativeStrategy) Reset() { *m = NativeStrategy{} } func (*NativeStrategy) ProtoMessage() {} func (*NativeStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_871e47633eb7aad4, []int{8} + return fileDescriptor_871e47633eb7aad4, []int{12} } func (m *NativeStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +415,7 @@ var xxx_messageInfo_NativeStrategy proto.InternalMessageInfo func (m *PersistenceStrategy) Reset() { *m = PersistenceStrategy{} } func (*PersistenceStrategy) ProtoMessage() {} func (*PersistenceStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_871e47633eb7aad4, []int{9} + return fileDescriptor_871e47633eb7aad4, []int{13} } func (m *PersistenceStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -335,6 +447,11 @@ func init() { proto.RegisterType((*EventBusList)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.EventBusList") proto.RegisterType((*EventBusSpec)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.EventBusSpec") proto.RegisterType((*EventBusStatus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.EventBusStatus") + proto.RegisterType((*JetStreamBus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.JetStreamBus") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.JetStreamBus.NodeSelectorEntry") + proto.RegisterType((*JetStreamConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.JetStreamConfig") + proto.RegisterType((*KafkaBus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.KafkaBus") + proto.RegisterType((*KafkaConsumerGroup)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.KafkaConsumerGroup") proto.RegisterType((*NATSBus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.NATSBus") proto.RegisterType((*NATSConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.NATSConfig") proto.RegisterType((*NativeStrategy)(nil), "github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1.NativeStrategy") @@ -347,94 +464,135 @@ func init() { } var fileDescriptor_871e47633eb7aad4 = []byte{ - // 1386 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdf, 0x6f, 0x1b, 0xc5, - 0x13, 0xcf, 0x25, 0x8e, 0x63, 0xaf, 0xdd, 0xc4, 0xde, 0xf6, 0xfb, 0xd5, 0x11, 0x15, 0xbb, 0xb2, - 0x54, 0x14, 0x89, 0xf6, 0x4c, 0x2b, 0x04, 0xa5, 0x2f, 0xc5, 0x97, 0xa6, 0xd0, 0x52, 0xa7, 0x61, - 0xdd, 0x56, 0x02, 0x2a, 0xca, 0xe6, 0x32, 0x71, 0x2e, 0xf1, 0xdd, 0xb9, 0xb7, 0x7b, 0x56, 0xcc, - 0x13, 0xe2, 0x2f, 0x40, 0x3c, 0x20, 0xfe, 0x03, 0xfe, 0x95, 0x3e, 0xf0, 0xd0, 0x37, 0xfa, 0x82, - 0xd5, 0xba, 0xe2, 0x9f, 0xe8, 0x13, 0xda, 0xbd, 0xbd, 0x1f, 0xf6, 0x39, 0x25, 0x25, 0xe1, 0xc9, - 0xb7, 0xb3, 0x33, 0x9f, 0xcf, 0xcc, 0xec, 0xce, 0xcc, 0x1a, 0xdd, 0xe9, 0xda, 0x7c, 0x2f, 0xd8, - 0x36, 0x2c, 0xcf, 0x69, 0x52, 0xbf, 0xeb, 0xf5, 0x7d, 0x6f, 0x5f, 0x7e, 0x5c, 0x86, 0x01, 0xb8, - 0x9c, 0x35, 0xfb, 0x07, 0xdd, 0x26, 0xed, 0xdb, 0xac, 0x29, 0xd7, 0xdb, 0x01, 0x6b, 0x0e, 0xae, - 0xd0, 0x5e, 0x7f, 0x8f, 0x5e, 0x69, 0x76, 0xc1, 0x05, 0x9f, 0x72, 0xd8, 0x31, 0xfa, 0xbe, 0xc7, - 0x3d, 0x7c, 0x3d, 0xc1, 0x32, 0x22, 0x2c, 0xf9, 0xf1, 0x38, 0xc4, 0x32, 0xfa, 0x07, 0x5d, 0x43, - 0x60, 0x19, 0x11, 0x96, 0x11, 0x61, 0xad, 0xde, 0x38, 0xb6, 0x1f, 0x96, 0xe7, 0x38, 0x9e, 0x3b, - 0x4d, 0xbe, 0x7a, 0x39, 0x05, 0xd0, 0xf5, 0xba, 0x5e, 0x53, 0x8a, 0xb7, 0x83, 0x5d, 0xb9, 0x92, - 0x0b, 0xf9, 0xa5, 0xd4, 0x1b, 0x07, 0xd7, 0x98, 0x61, 0x7b, 0x02, 0xb2, 0x69, 0x79, 0x3e, 0x34, - 0x07, 0x99, 0x78, 0x56, 0x3f, 0x4c, 0x74, 0x1c, 0x6a, 0xed, 0xd9, 0x2e, 0xf8, 0xc3, 0xc8, 0x8f, - 0xa6, 0x0f, 0xcc, 0x0b, 0x7c, 0x0b, 0xde, 0xca, 0x8a, 0x35, 0x1d, 0xe0, 0x74, 0x16, 0x57, 0xf3, - 0x28, 0x2b, 0x3f, 0x70, 0xb9, 0xed, 0x64, 0x69, 0x3e, 0xfa, 0x27, 0x03, 0x66, 0xed, 0x81, 0x43, - 0xa7, 0xed, 0x1a, 0x4f, 0x50, 0xd1, 0x0c, 0xd8, 0xba, 0xe7, 0xee, 0xda, 0x5d, 0xbc, 0x83, 0x72, - 0x2e, 0xe5, 0x4c, 0xd7, 0x2e, 0x68, 0x6b, 0xa5, 0xab, 0xb7, 0x8c, 0x7f, 0x7f, 0x80, 0xc6, 0x66, - 0xeb, 0x7e, 0x27, 0x44, 0x35, 0x0b, 0xe3, 0x51, 0x3d, 0x27, 0xd6, 0x44, 0xa2, 0x37, 0x5c, 0x54, - 0x5d, 0xf7, 0x5c, 0x4e, 0x85, 0x8f, 0xf7, 0xc1, 0xe9, 0xf7, 0x28, 0x07, 0xfc, 0x15, 0x2a, 0x46, - 0x29, 0x8c, 0xf8, 0xd7, 0x8c, 0x30, 0x26, 0x41, 0x61, 0x88, 0x43, 0x31, 0x06, 0x57, 0x0c, 0xa2, - 0x94, 0x08, 0x3c, 0x09, 0x6c, 0x1f, 0x1c, 0xe1, 0x87, 0x59, 0x7d, 0x3a, 0xaa, 0xcf, 0x8d, 0x47, - 0xf5, 0x62, 0xb4, 0xcb, 0x48, 0x82, 0xd6, 0xf8, 0x7d, 0x1e, 0x15, 0x36, 0x84, 0x83, 0x66, 0xc0, - 0xf0, 0x77, 0xa8, 0x20, 0x72, 0xbe, 0x43, 0x39, 0x55, 0x34, 0x1f, 0xa4, 0x68, 0xe2, 0xd4, 0x25, - 0xa1, 0x09, 0x6d, 0x41, 0x7c, 0x6f, 0x7b, 0x1f, 0x2c, 0xde, 0x06, 0x4e, 0x4d, 0xac, 0xe8, 0x50, - 0x22, 0x23, 0x31, 0x2a, 0xde, 0x47, 0x39, 0xd6, 0x07, 0x4b, 0x9f, 0x97, 0xe8, 0x9f, 0x9f, 0x24, - 0x89, 0x91, 0xd7, 0x9d, 0x3e, 0x58, 0x66, 0x59, 0xb1, 0xe6, 0xc4, 0x8a, 0x48, 0x0e, 0xec, 0xa3, - 0x3c, 0xe3, 0x94, 0x07, 0x4c, 0x5f, 0x90, 0x6c, 0x77, 0x4e, 0x85, 0x4d, 0x22, 0x9a, 0xcb, 0x8a, - 0x2f, 0x1f, 0xae, 0x89, 0x62, 0x6a, 0xfc, 0xa1, 0xa1, 0x72, 0xa4, 0x7a, 0xd7, 0x66, 0x1c, 0x3f, - 0xca, 0xa4, 0xd4, 0x38, 0x5e, 0x4a, 0x85, 0xb5, 0x4c, 0x68, 0x45, 0x51, 0x15, 0x22, 0x49, 0x2a, - 0x9d, 0x36, 0x5a, 0xb4, 0x39, 0x38, 0x4c, 0x9f, 0xbf, 0xb0, 0xb0, 0x56, 0xba, 0x7a, 0xf3, 0x34, - 0x22, 0x34, 0xcf, 0x28, 0xc2, 0xc5, 0xdb, 0x02, 0x9a, 0x84, 0x0c, 0x8d, 0x27, 0x49, 0x60, 0x22, - 0xc7, 0x98, 0x4e, 0x94, 0xc3, 0xfa, 0x49, 0xcb, 0x41, 0x10, 0x4f, 0xd7, 0xc2, 0x0b, 0x0d, 0x2d, - 0x4f, 0xe6, 0x1d, 0x3f, 0x8e, 0xcf, 0x34, 0xe4, 0xfd, 0xf8, 0xf8, 0xbc, 0x61, 0x2f, 0x34, 0xde, - 0x7c, 0x80, 0xd8, 0x41, 0x79, 0x4b, 0x56, 0xa6, 0xba, 0xa2, 0x1b, 0x27, 0x09, 0x2c, 0x6e, 0x1e, - 0x09, 0x5d, 0xb8, 0x26, 0x8a, 0xa4, 0xf1, 0x97, 0x86, 0x96, 0x54, 0xf8, 0xd8, 0x45, 0x79, 0x97, - 0x72, 0x7b, 0x00, 0x2a, 0xb6, 0x13, 0xdd, 0xd7, 0x4d, 0x89, 0xd4, 0xe1, 0xa2, 0x9d, 0x75, 0x87, - 0x26, 0x12, 0xdc, 0xa1, 0x8c, 0x28, 0x16, 0xbc, 0x8f, 0xf2, 0x70, 0xe8, 0x71, 0x3b, 0xaa, 0xc6, - 0xd3, 0x6a, 0x69, 0x92, 0x6b, 0x43, 0x22, 0x13, 0xc5, 0xd0, 0x78, 0xa5, 0x21, 0x94, 0xa8, 0xe0, - 0x77, 0xd1, 0x42, 0xe0, 0xf7, 0x64, 0x9c, 0x45, 0xb3, 0xa4, 0x72, 0xb3, 0xf0, 0x80, 0xdc, 0x25, - 0x42, 0x8e, 0xdf, 0x47, 0x45, 0xab, 0x17, 0x30, 0x0e, 0xfe, 0xed, 0x9b, 0xd2, 0xb9, 0xa2, 0x79, - 0x46, 0x74, 0xb0, 0xf5, 0x48, 0x48, 0x92, 0x7d, 0x7c, 0x09, 0xe5, 0x68, 0xc0, 0xf7, 0x64, 0x91, - 0x17, 0x4d, 0x5d, 0xdc, 0xa1, 0x56, 0xc0, 0xf7, 0x5e, 0x8f, 0xea, 0x65, 0xf1, 0x1b, 0xa5, 0x80, - 0x48, 0x2d, 0xfc, 0x0d, 0x2a, 0x53, 0xcb, 0x02, 0xc6, 0x3a, 0x60, 0xf9, 0xc0, 0xf5, 0x9c, 0x0c, - 0xfd, 0xe2, 0xac, 0x6e, 0x1a, 0x6a, 0x7c, 0x01, 0xc3, 0x0e, 0xf4, 0xc0, 0xe2, 0x9e, 0x6f, 0x56, - 0xc6, 0x02, 0x34, 0x65, 0x4e, 0x26, 0xc0, 0x1a, 0x7f, 0x96, 0xd1, 0xf2, 0x64, 0xe2, 0xf1, 0x25, - 0x54, 0xf0, 0xa1, 0xdf, 0xb3, 0x2d, 0x1a, 0x5e, 0xd9, 0xc5, 0xa4, 0x9e, 0x89, 0x92, 0x93, 0x58, - 0x23, 0x8e, 0x65, 0xfe, 0x58, 0xb1, 0x98, 0xa8, 0x4c, 0x5d, 0x6e, 0xb7, 0x76, 0x77, 0x6d, 0xd7, - 0xe6, 0x43, 0x99, 0x81, 0x82, 0x59, 0x53, 0xf8, 0xff, 0xbf, 0x09, 0x7d, 0x1f, 0x2c, 0x31, 0xce, - 0x5a, 0x29, 0x2d, 0x32, 0x61, 0x83, 0x7f, 0xd4, 0x50, 0xa9, 0x0f, 0x3e, 0xb3, 0x19, 0x07, 0xd7, - 0x02, 0x95, 0x8f, 0x7b, 0x27, 0xb9, 0x0a, 0x5b, 0x09, 0x5c, 0x7c, 0xff, 0x56, 0xc6, 0xa3, 0x7a, - 0x29, 0xb5, 0x41, 0xd2, 0xa4, 0xf8, 0x67, 0x0d, 0x55, 0xad, 0xe9, 0xa9, 0xa7, 0x2f, 0x4a, 0x57, - 0xda, 0x27, 0x71, 0x25, 0x33, 0x4a, 0xcd, 0xff, 0x8d, 0x47, 0xf5, 0xec, 0x84, 0x25, 0x59, 0x7a, - 0xfc, 0x9b, 0x86, 0x74, 0x07, 0xb8, 0x6f, 0x5b, 0x2c, 0xa3, 0xaf, 0xe7, 0xff, 0x0b, 0xdf, 0xce, - 0x8f, 0x47, 0x75, 0xbd, 0x7d, 0x04, 0x25, 0x39, 0xd2, 0x19, 0xfc, 0x8b, 0x86, 0xca, 0xae, 0xb7, - 0x03, 0xd1, 0x3d, 0xd5, 0x97, 0xe4, 0x34, 0x78, 0x74, 0x7a, 0xfd, 0xc3, 0xd8, 0x4c, 0xc1, 0x6f, - 0xb8, 0xdc, 0x1f, 0x9a, 0xe7, 0xd4, 0x35, 0x2b, 0xa7, 0xb7, 0xc8, 0x84, 0x1f, 0xf8, 0x01, 0x2a, - 0x71, 0xaf, 0x27, 0x9e, 0x54, 0xb6, 0xe7, 0x32, 0xbd, 0x20, 0xdd, 0xaa, 0xcd, 0xaa, 0xb5, 0xfb, - 0xb1, 0x9a, 0x79, 0x56, 0x01, 0x97, 0x12, 0x19, 0x23, 0x69, 0x1c, 0x6c, 0xa5, 0x66, 0x6a, 0x51, - 0x1e, 0xc4, 0x27, 0x6f, 0x3d, 0x06, 0xda, 0x0a, 0xc0, 0x2c, 0x8b, 0x52, 0x8c, 0x56, 0xa9, 0xd1, - 0x0a, 0x68, 0x85, 0x81, 0x15, 0xf8, 0x36, 0x1f, 0x8a, 0x8c, 0xc3, 0x21, 0xd7, 0x91, 0xe4, 0x7a, - 0x6f, 0x96, 0xff, 0x5b, 0xde, 0x4e, 0x67, 0x52, 0xdb, 0x3c, 0x3b, 0x1e, 0xd5, 0x57, 0xa6, 0x84, - 0x64, 0x1a, 0x13, 0x37, 0x50, 0xde, 0xa1, 0x87, 0xad, 0x2e, 0xe8, 0x25, 0x59, 0xf3, 0xb2, 0x79, - 0xb6, 0xa5, 0x84, 0xa8, 0x1d, 0xec, 0xa2, 0x8a, 0xed, 0xd0, 0x2e, 0x6c, 0x05, 0xbd, 0x5e, 0xd8, - 0x69, 0x98, 0x5e, 0x96, 0xb9, 0x9c, 0xf9, 0x0a, 0xbc, 0xeb, 0x59, 0xb4, 0x17, 0xbe, 0xbe, 0x08, - 0xec, 0x82, 0x2f, 0x4a, 0xcc, 0xd4, 0x55, 0x56, 0x2b, 0xb7, 0xa7, 0x90, 0x48, 0x06, 0x1b, 0xdf, - 0x41, 0x98, 0x81, 0x3f, 0xb0, 0x2d, 0x68, 0x59, 0x96, 0x17, 0xb8, 0x7c, 0x93, 0x3a, 0xa0, 0x9f, - 0x91, 0xfe, 0xad, 0x2a, 0x1c, 0xdc, 0xc9, 0x68, 0x90, 0x19, 0x56, 0xf8, 0x33, 0x54, 0xed, 0xfb, - 0xb6, 0x27, 0x43, 0xee, 0x51, 0xc6, 0x24, 0xd4, 0xb2, 0x84, 0x7a, 0x47, 0x41, 0x55, 0xb7, 0xa6, - 0x15, 0x48, 0xd6, 0x06, 0xaf, 0xa1, 0x42, 0x24, 0xd4, 0x57, 0x64, 0x23, 0x95, 0x27, 0x17, 0xd9, - 0x92, 0x78, 0x17, 0xdf, 0x42, 0x05, 0x1a, 0xb5, 0xc4, 0x8a, 0x3c, 0xb2, 0xf3, 0xb3, 0xd2, 0x14, - 0xb5, 0xc0, 0x10, 0x27, 0x6e, 0x8f, 0xb1, 0x2d, 0xbe, 0x88, 0x96, 0x1c, 0x7a, 0xd8, 0x66, 0x5d, - 0xa6, 0x57, 0x2f, 0x68, 0x6b, 0x39, 0xb3, 0x34, 0x1e, 0xd5, 0x97, 0xda, 0xa1, 0x88, 0x44, 0x7b, - 0xc2, 0x31, 0x87, 0x1e, 0x9a, 0x43, 0x0e, 0x4c, 0xc7, 0x32, 0xb0, 0xf0, 0x4a, 0x29, 0x19, 0x89, - 0x77, 0x57, 0x6f, 0xa0, 0x6a, 0xa6, 0x8e, 0x70, 0x05, 0x2d, 0x1c, 0xc0, 0x30, 0x1c, 0x85, 0x44, - 0x7c, 0xe2, 0x73, 0x68, 0x71, 0x40, 0x7b, 0x01, 0x84, 0x53, 0x80, 0x84, 0x8b, 0xeb, 0xf3, 0xd7, - 0xb4, 0xc6, 0xaf, 0xf3, 0xe8, 0xec, 0x8c, 0xee, 0x8a, 0x3f, 0x45, 0x15, 0xc6, 0x3d, 0x9f, 0x76, - 0x21, 0xc9, 0x71, 0x38, 0x5b, 0xcf, 0x89, 0x23, 0xef, 0x4c, 0xed, 0x91, 0x8c, 0x36, 0x7e, 0x8c, - 0x50, 0x38, 0xc9, 0xda, 0xde, 0x8e, 0x22, 0x36, 0x6f, 0x88, 0x57, 0x7c, 0x2b, 0x96, 0xbe, 0x1e, - 0xd5, 0x2f, 0x67, 0xff, 0x08, 0x26, 0xdd, 0x9e, 0x3f, 0xf4, 0x7a, 0x81, 0x03, 0x89, 0x01, 0x49, - 0x41, 0xe2, 0x6f, 0x11, 0x1a, 0xc8, 0xfd, 0x8e, 0xfd, 0x3d, 0xa8, 0x07, 0xf9, 0x1b, 0x5f, 0xc2, - 0x46, 0xf4, 0x1f, 0xc5, 0xf8, 0x32, 0x10, 0x13, 0x8b, 0x0f, 0xcd, 0x65, 0xe1, 0xd0, 0xc3, 0x18, - 0x85, 0xa4, 0x10, 0x4d, 0xe3, 0xe9, 0xcb, 0xda, 0xdc, 0xb3, 0x97, 0xb5, 0xb9, 0xe7, 0x2f, 0x6b, - 0x73, 0x3f, 0x8c, 0x6b, 0xda, 0xd3, 0x71, 0x4d, 0x7b, 0x36, 0xae, 0x69, 0xcf, 0xc7, 0x35, 0xed, - 0xc5, 0xb8, 0xa6, 0xfd, 0xf4, 0xaa, 0x36, 0xf7, 0x75, 0x21, 0xea, 0x6f, 0x7f, 0x07, 0x00, 0x00, - 0xff, 0xff, 0xd9, 0xdd, 0x9f, 0xc3, 0xcc, 0x0f, 0x00, 0x00, + // 2037 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x59, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x8a, 0x94, 0x44, 0x0e, 0x29, 0x51, 0x1c, 0x29, 0xcd, 0x5a, 0x88, 0x49, 0x83, 0x41, + 0x0a, 0x17, 0x89, 0x97, 0x75, 0x91, 0xb6, 0xae, 0x7b, 0x70, 0xb9, 0x8a, 0x62, 0xcb, 0x16, 0x65, + 0x75, 0x28, 0x1b, 0x48, 0x1a, 0xd4, 0x19, 0xad, 0x46, 0xd4, 0x4a, 0xfb, 0x87, 0xdd, 0x99, 0x25, + 0xc4, 0x9e, 0x8a, 0xf6, 0x50, 0xa0, 0xa7, 0xa0, 0x28, 0x8a, 0x7e, 0x83, 0x02, 0xbd, 0xb7, 0xdf, + 0xa0, 0xa8, 0x0f, 0x3d, 0x04, 0xbd, 0x34, 0x87, 0x82, 0x88, 0x19, 0xf4, 0x4b, 0xf8, 0x54, 0xcc, + 0xec, 0xec, 0x1f, 0xee, 0x52, 0xb1, 0x64, 0xd2, 0x35, 0x72, 0xdb, 0x79, 0xef, 0xcd, 0xef, 0xbd, + 0x37, 0x7f, 0xde, 0xfb, 0x0d, 0x09, 0xee, 0x77, 0x4d, 0x76, 0xec, 0x1f, 0x68, 0x86, 0x6b, 0x37, + 0xb1, 0xd7, 0x75, 0x7b, 0x9e, 0x7b, 0x22, 0x3e, 0x6e, 0x90, 0x3e, 0x71, 0x18, 0x6d, 0xf6, 0x4e, + 0xbb, 0x4d, 0xdc, 0x33, 0x69, 0x53, 0x8c, 0x0f, 0x7c, 0xda, 0xec, 0xdf, 0xc4, 0x56, 0xef, 0x18, + 0xdf, 0x6c, 0x76, 0x89, 0x43, 0x3c, 0xcc, 0xc8, 0xa1, 0xd6, 0xf3, 0x5c, 0xe6, 0xc2, 0xdb, 0x31, + 0x96, 0x16, 0x62, 0x89, 0x8f, 0x27, 0x01, 0x96, 0xd6, 0x3b, 0xed, 0x6a, 0x1c, 0x4b, 0x0b, 0xb1, + 0xb4, 0x10, 0x6b, 0xe3, 0xce, 0x85, 0xe3, 0x30, 0x5c, 0xdb, 0x76, 0x9d, 0xb4, 0xf3, 0x8d, 0x1b, + 0x09, 0x80, 0xae, 0xdb, 0x75, 0x9b, 0x42, 0x7c, 0xe0, 0x1f, 0x89, 0x91, 0x18, 0x88, 0x2f, 0x69, + 0xde, 0x38, 0xbd, 0x45, 0x35, 0xd3, 0xe5, 0x90, 0x4d, 0xc3, 0xf5, 0x48, 0xb3, 0x9f, 0xc9, 0x67, + 0xe3, 0xfd, 0xd8, 0xc6, 0xc6, 0xc6, 0xb1, 0xe9, 0x10, 0x6f, 0x10, 0xc6, 0xd1, 0xf4, 0x08, 0x75, + 0x7d, 0xcf, 0x20, 0x97, 0x9a, 0x45, 0x9b, 0x36, 0x61, 0x78, 0x92, 0xaf, 0xe6, 0x79, 0xb3, 0x3c, + 0xdf, 0x61, 0xa6, 0x9d, 0x75, 0xf3, 0x83, 0x17, 0x4d, 0xa0, 0xc6, 0x31, 0xb1, 0x71, 0x7a, 0x5e, + 0xe3, 0x5f, 0xf3, 0xa0, 0xa8, 0xfb, 0x74, 0xd3, 0x75, 0x8e, 0xcc, 0x2e, 0x3c, 0x04, 0x79, 0x07, + 0x33, 0xaa, 0x2a, 0xd7, 0x94, 0xeb, 0xa5, 0xef, 0x7d, 0xa8, 0xbd, 0xfc, 0x0e, 0x6a, 0xbb, 0xad, + 0xfd, 0x4e, 0x80, 0xaa, 0x17, 0x46, 0xc3, 0x7a, 0x9e, 0x8f, 0x91, 0x40, 0x87, 0x67, 0xa0, 0x78, + 0x42, 0x18, 0x65, 0x1e, 0xc1, 0xb6, 0x3a, 0x2f, 0x5c, 0x3d, 0x98, 0xc6, 0xd5, 0x7d, 0xc2, 0x3a, + 0x02, 0x4c, 0xfa, 0x5b, 0x1e, 0x0d, 0xeb, 0xc5, 0x48, 0x88, 0x62, 0x67, 0x90, 0x80, 0x85, 0x53, + 0x7c, 0x74, 0x8a, 0xd5, 0x9c, 0xf0, 0xfa, 0xc1, 0x34, 0x5e, 0x1f, 0x70, 0x20, 0xdd, 0xa7, 0x7a, + 0x71, 0x34, 0xac, 0x2f, 0x88, 0x11, 0x0a, 0xd0, 0x1b, 0x7f, 0x9b, 0x07, 0xd5, 0x4d, 0xd7, 0x61, + 0x98, 0x6f, 0xc3, 0x3e, 0xb1, 0x7b, 0x16, 0x66, 0x04, 0x7e, 0x04, 0x8a, 0xe1, 0x29, 0x09, 0x57, + 0xf8, 0xba, 0x16, 0x6c, 0x1b, 0xf7, 0xa1, 0xf1, 0x73, 0xa7, 0xf5, 0x6f, 0x6a, 0x48, 0x1a, 0x21, + 0xf2, 0x0b, 0xdf, 0xf4, 0x88, 0xcd, 0x03, 0xd1, 0xab, 0x4f, 0x87, 0xf5, 0x39, 0x9e, 0x57, 0xa8, + 0xa5, 0x28, 0x46, 0x83, 0x07, 0xa0, 0x62, 0xda, 0xb8, 0x4b, 0xf6, 0x7c, 0xcb, 0xda, 0x73, 0x2d, + 0xd3, 0x18, 0x88, 0x75, 0x2d, 0xea, 0xb7, 0xe4, 0xb4, 0xca, 0xf6, 0xb8, 0xfa, 0xf9, 0xb0, 0x7e, + 0x35, 0x7b, 0xe4, 0xb5, 0xd8, 0x00, 0xa5, 0x01, 0xb9, 0x0f, 0x4a, 0x0c, 0xdf, 0x33, 0xd9, 0x80, + 0xe7, 0x46, 0xce, 0x98, 0x5c, 0xc5, 0xb7, 0x27, 0x25, 0xd1, 0x19, 0x37, 0xd5, 0xd7, 0x78, 0x10, + 0x29, 0x21, 0x4a, 0x03, 0x36, 0xfe, 0x39, 0x0f, 0x0a, 0x5b, 0x7c, 0xa5, 0x75, 0x9f, 0xc2, 0x4f, + 0x41, 0x81, 0x5f, 0x8f, 0x43, 0xcc, 0xb0, 0x5c, 0xae, 0xef, 0x26, 0x3c, 0x45, 0xa7, 0x3c, 0xde, + 0x23, 0x6e, 0xcd, 0x7d, 0x3f, 0x3c, 0x38, 0x21, 0x06, 0x6b, 0x13, 0x86, 0x75, 0x28, 0xf3, 0x07, + 0xb1, 0x0c, 0x45, 0xa8, 0xf0, 0x04, 0xe4, 0x69, 0x8f, 0x18, 0xf2, 0x0c, 0xde, 0x9b, 0xe6, 0x34, + 0x84, 0x51, 0x77, 0x7a, 0xc4, 0xd0, 0xcb, 0xd2, 0x6b, 0x9e, 0x8f, 0x90, 0xf0, 0x01, 0x3d, 0xb0, + 0x48, 0x19, 0x66, 0x3e, 0x95, 0xab, 0x76, 0x7f, 0x26, 0xde, 0x04, 0xa2, 0xbe, 0x22, 0xfd, 0x2d, + 0x06, 0x63, 0x24, 0x3d, 0x35, 0xfe, 0xad, 0x80, 0x72, 0x68, 0xba, 0x63, 0x52, 0x06, 0x3f, 0xc9, + 0x2c, 0xa9, 0x76, 0xb1, 0x25, 0xe5, 0xb3, 0xc5, 0x82, 0xae, 0x4a, 0x57, 0x85, 0x50, 0x92, 0x58, + 0x4e, 0x13, 0x2c, 0x98, 0x8c, 0xd8, 0x54, 0x9d, 0xbf, 0x96, 0x9b, 0xf6, 0x76, 0x85, 0x61, 0xeb, + 0xcb, 0xd2, 0xe1, 0xc2, 0x36, 0x87, 0x46, 0x81, 0x87, 0xc6, 0x7f, 0x72, 0x71, 0x66, 0x7c, 0x91, + 0x21, 0x1e, 0xab, 0x5c, 0x9b, 0xd3, 0x56, 0x2e, 0xee, 0x39, 0x5d, 0xb6, 0xfc, 0x6c, 0xd9, 0xba, + 0x37, 0x93, 0xb2, 0x25, 0xd2, 0x7c, 0xcd, 0x35, 0x0b, 0xfe, 0x4e, 0x01, 0x95, 0xc8, 0xe9, 0xd6, + 0x99, 0xcb, 0x4c, 0x43, 0xcd, 0xcf, 0xbe, 0x36, 0x8b, 0x3a, 0x10, 0x09, 0x03, 0x3f, 0x28, 0xed, + 0xb8, 0xf1, 0xa5, 0x02, 0x56, 0xc6, 0xcf, 0x38, 0x7c, 0x12, 0xdd, 0x9f, 0x60, 0x8b, 0x7f, 0x78, + 0xf1, 0xa8, 0x02, 0x8a, 0xa0, 0x7d, 0xfd, 0x65, 0x81, 0x36, 0x58, 0x34, 0x44, 0x8c, 0x72, 0x6f, + 0xb7, 0xa6, 0x49, 0x3b, 0x6a, 0xa9, 0xb1, 0xbb, 0x60, 0x8c, 0xa4, 0x93, 0xc6, 0x6f, 0x56, 0x40, + 0x39, 0x79, 0x02, 0xe0, 0x77, 0xc0, 0x52, 0x9f, 0x78, 0xd4, 0x74, 0x1d, 0x91, 0x61, 0x51, 0xaf, + 0xc8, 0x99, 0x4b, 0x8f, 0x03, 0x31, 0x0a, 0xf5, 0xf0, 0x3a, 0x28, 0x78, 0xa4, 0x67, 0x99, 0x06, + 0xa6, 0x22, 0xd8, 0x05, 0xbd, 0xcc, 0xaf, 0x24, 0x92, 0x32, 0x14, 0x69, 0xe1, 0xef, 0x15, 0x50, + 0x35, 0xd2, 0x9d, 0x48, 0x9e, 0xa4, 0xf6, 0x34, 0x09, 0x66, 0xda, 0x9b, 0xfe, 0xc6, 0x68, 0x58, + 0xcf, 0x76, 0x3d, 0x94, 0x75, 0x0f, 0xff, 0xa2, 0x80, 0x2b, 0x1e, 0xb1, 0x5c, 0x7c, 0x48, 0xbc, + 0xcc, 0x04, 0x79, 0xe8, 0x66, 0x1c, 0xdc, 0xd5, 0xd1, 0xb0, 0x7e, 0x05, 0x9d, 0xe7, 0x13, 0x9d, + 0x1f, 0x0e, 0xfc, 0xb3, 0x02, 0x54, 0x9b, 0x30, 0xcf, 0x34, 0x68, 0x36, 0xd6, 0x85, 0x57, 0x11, + 0xeb, 0x5b, 0xa3, 0x61, 0x5d, 0x6d, 0x9f, 0xe3, 0x12, 0x9d, 0x1b, 0x0c, 0xfc, 0xb5, 0x02, 0x4a, + 0x3d, 0x7e, 0x42, 0x28, 0x23, 0x8e, 0x41, 0xd4, 0x45, 0x11, 0xdc, 0xc3, 0x69, 0x82, 0xdb, 0x8b, + 0xe1, 0x3a, 0x8c, 0xd3, 0xc6, 0xee, 0x40, 0xaf, 0x8c, 0x86, 0xf5, 0x52, 0x42, 0x81, 0x92, 0x4e, + 0xa1, 0x91, 0xe8, 0x30, 0x4b, 0x22, 0x80, 0x1f, 0x5d, 0xfa, 0xa2, 0xb6, 0x25, 0x40, 0x70, 0xaa, + 0xc3, 0x51, 0xa2, 0xd1, 0xfc, 0x41, 0x01, 0x65, 0xc7, 0x3d, 0x24, 0x1d, 0x62, 0x11, 0x83, 0xb9, + 0x9e, 0x5a, 0x10, 0x0d, 0xe7, 0xe3, 0x59, 0x55, 0x63, 0x6d, 0x37, 0x01, 0xbe, 0xe5, 0x30, 0x6f, + 0xa0, 0xaf, 0xcb, 0xcb, 0x58, 0x4e, 0xaa, 0xd0, 0x58, 0x14, 0xf0, 0x11, 0x28, 0x31, 0xd7, 0xe2, + 0xf4, 0xda, 0x74, 0x1d, 0xaa, 0x16, 0x45, 0x50, 0xb5, 0x49, 0xec, 0x68, 0x3f, 0x32, 0xd3, 0xd7, + 0x24, 0x70, 0x29, 0x96, 0x51, 0x94, 0xc4, 0x81, 0x24, 0x4b, 0xbc, 0x80, 0x58, 0xd9, 0x6f, 0x4f, + 0x82, 0xde, 0x73, 0x0f, 0x5f, 0x8a, 0x7b, 0x41, 0x07, 0xac, 0x46, 0x94, 0xaf, 0x43, 0x0c, 0x8f, + 0x30, 0xaa, 0x96, 0x44, 0x0a, 0x13, 0x59, 0xea, 0x8e, 0x6b, 0x60, 0x2b, 0x60, 0x55, 0x88, 0x1c, + 0x11, 0x8f, 0xef, 0xbe, 0xae, 0xca, 0x64, 0x56, 0xb7, 0x53, 0x48, 0x28, 0x83, 0x0d, 0xef, 0x82, + 0x6a, 0xcf, 0x33, 0x5d, 0x11, 0x82, 0x85, 0x29, 0xdd, 0xc5, 0x36, 0x51, 0xcb, 0xa2, 0xf2, 0x5d, + 0x91, 0x30, 0xd5, 0xbd, 0xb4, 0x01, 0xca, 0xce, 0xe1, 0xd5, 0x30, 0x14, 0xaa, 0xcb, 0x71, 0x35, + 0x0c, 0xe7, 0xa2, 0x48, 0x0b, 0x3f, 0x04, 0x05, 0x7c, 0x74, 0x64, 0x3a, 0xdc, 0x72, 0x45, 0x2c, + 0xe1, 0x5b, 0x93, 0x52, 0x6b, 0x49, 0x9b, 0x00, 0x27, 0x1c, 0xa1, 0x68, 0x2e, 0xbc, 0x0f, 0x20, + 0x25, 0x5e, 0xdf, 0x34, 0x48, 0xcb, 0x30, 0x5c, 0xdf, 0x61, 0x22, 0xf6, 0x8a, 0x88, 0x7d, 0x43, + 0xc6, 0x0e, 0x3b, 0x19, 0x0b, 0x34, 0x61, 0x16, 0x8f, 0x9e, 0x12, 0xc6, 0x4c, 0xa7, 0x4b, 0xd5, + 0x55, 0x81, 0x20, 0xbc, 0x76, 0xa4, 0x0c, 0x45, 0x5a, 0xf8, 0x2e, 0x28, 0x52, 0x86, 0x3d, 0xd6, + 0xf2, 0xba, 0x54, 0xad, 0x5e, 0xcb, 0x5d, 0x2f, 0x06, 0xac, 0xa1, 0x13, 0x0a, 0x51, 0xac, 0x87, + 0xef, 0x83, 0x32, 0x4d, 0xf4, 0x5d, 0x15, 0x0a, 0xe8, 0x55, 0x7e, 0x82, 0x93, 0xfd, 0x18, 0x8d, + 0x59, 0x41, 0x0d, 0x00, 0x1b, 0x9f, 0xed, 0xe1, 0x01, 0xaf, 0x86, 0xea, 0x9a, 0x98, 0xb3, 0xc2, + 0xe9, 0x73, 0x3b, 0x92, 0xa2, 0x84, 0xc5, 0xc6, 0x1d, 0x50, 0xcd, 0x5c, 0x15, 0xb8, 0x0a, 0x72, + 0xa7, 0x64, 0x10, 0x34, 0x31, 0xc4, 0x3f, 0xe1, 0x3a, 0x58, 0xe8, 0x63, 0xcb, 0x27, 0xc1, 0xa3, + 0x04, 0x05, 0x83, 0xdb, 0xf3, 0xb7, 0x94, 0xc6, 0x3f, 0x14, 0x50, 0x49, 0x51, 0x04, 0x78, 0x15, + 0xe4, 0x7c, 0xcf, 0x92, 0x4d, 0xb0, 0x24, 0x97, 0x33, 0xf7, 0x08, 0xed, 0x20, 0x2e, 0x87, 0x3f, + 0x03, 0x65, 0x6c, 0x18, 0x84, 0xd2, 0xe0, 0x20, 0xc9, 0x6e, 0xfd, 0xce, 0x39, 0x8f, 0x10, 0x8f, + 0xb0, 0x07, 0x64, 0x10, 0x06, 0x18, 0x2c, 0x40, 0x2b, 0x31, 0x1d, 0x8d, 0x81, 0xc1, 0x5b, 0xa9, + 0x65, 0xcb, 0x89, 0x20, 0xa2, 0xcb, 0x7f, 0xfe, 0xd2, 0x35, 0xfe, 0x9a, 0x03, 0x85, 0x90, 0x5e, + 0xbd, 0x28, 0x85, 0xb7, 0xc1, 0x02, 0x73, 0x7b, 0xa6, 0x21, 0x1f, 0x69, 0x11, 0xc5, 0xdd, 0xe7, + 0x42, 0x14, 0xe8, 0x92, 0x7c, 0x20, 0xf7, 0x02, 0x3e, 0xf0, 0x08, 0xe4, 0x98, 0x45, 0x65, 0xe7, + 0xbc, 0x7d, 0xe9, 0x7a, 0xbb, 0xbf, 0x13, 0xbe, 0xd4, 0x97, 0x78, 0x98, 0xfb, 0x3b, 0x1d, 0xc4, + 0xf1, 0xe0, 0x47, 0x20, 0x4f, 0x31, 0xb5, 0x64, 0x97, 0xfb, 0xf1, 0xe5, 0x09, 0x57, 0xab, 0xb3, + 0x93, 0xfc, 0x09, 0x80, 0x8f, 0x91, 0x80, 0x84, 0xbf, 0x55, 0xc0, 0xb2, 0xe1, 0x3a, 0xd4, 0xb7, + 0x89, 0x77, 0xd7, 0x73, 0xfd, 0x9e, 0xec, 0x56, 0xbb, 0x53, 0xb3, 0xdb, 0xcd, 0x24, 0xaa, 0x5e, + 0x1d, 0x0d, 0xeb, 0xcb, 0x63, 0x22, 0x34, 0xee, 0xb7, 0xf1, 0x77, 0x05, 0xc0, 0xec, 0x44, 0xd8, + 0x04, 0xc5, 0x2e, 0xff, 0x10, 0x37, 0x3b, 0xd8, 0xc7, 0xe8, 0x09, 0x7e, 0x37, 0x54, 0xa0, 0xd8, + 0x86, 0x97, 0x33, 0x8f, 0x1c, 0x60, 0x0b, 0x27, 0x7a, 0xa5, 0xdc, 0xdf, 0xa8, 0x9c, 0xa1, 0xb4, + 0x01, 0xca, 0xce, 0x81, 0xdf, 0x07, 0x25, 0x71, 0x8d, 0x1f, 0x5a, 0x87, 0x84, 0x06, 0x6f, 0xec, + 0x42, 0xdc, 0x25, 0x3a, 0xb1, 0x0a, 0x25, 0xed, 0x1a, 0xff, 0x55, 0xc0, 0x92, 0x7c, 0xb9, 0x40, + 0x07, 0x2c, 0x3a, 0x98, 0x99, 0x7d, 0x22, 0xb9, 0xf2, 0x54, 0x6f, 0xcd, 0x5d, 0x81, 0x14, 0xb5, + 0x7f, 0xc0, 0xb9, 0x6c, 0x20, 0x43, 0xd2, 0x0b, 0x3c, 0x01, 0x8b, 0x24, 0x78, 0x31, 0xcc, 0xcf, + 0xf4, 0x87, 0x23, 0xe1, 0x4b, 0xbe, 0x11, 0xa4, 0x87, 0xc6, 0x57, 0x0a, 0x00, 0xb1, 0xc9, 0x8b, + 0x6e, 0xda, 0xbb, 0xa0, 0x68, 0x58, 0x3e, 0x65, 0xc4, 0xdb, 0xfe, 0x20, 0xbc, 0x6d, 0x7c, 0x0b, + 0x37, 0x43, 0x21, 0x8a, 0xf5, 0xf0, 0x3d, 0x90, 0xc7, 0x3e, 0x3b, 0x96, 0xd7, 0x4d, 0xe5, 0x47, + 0xb6, 0xe5, 0xb3, 0xe3, 0xe7, 0xbc, 0x64, 0xf8, 0xec, 0x38, 0xda, 0x34, 0x61, 0x95, 0xa9, 0x43, + 0xf9, 0x19, 0xd6, 0xa1, 0xc6, 0x67, 0x15, 0xb0, 0x32, 0xbe, 0xf0, 0xf0, 0xbd, 0x04, 0xe9, 0x57, + 0x44, 0x9b, 0x8b, 0xde, 0xe2, 0x13, 0x88, 0x7f, 0x98, 0xcb, 0xfc, 0x85, 0x72, 0x49, 0x53, 0xc7, + 0xdc, 0xeb, 0xa0, 0x8e, 0x93, 0xdf, 0x2a, 0xf9, 0xd7, 0xfb, 0x56, 0xf9, 0xe6, 0xd0, 0xff, 0x3f, + 0xa6, 0x49, 0xf1, 0xa2, 0x20, 0x6f, 0x9f, 0xcc, 0xee, 0xee, 0xcf, 0x86, 0x16, 0x2f, 0xcd, 0x88, + 0x16, 0x27, 0x5f, 0x1a, 0x85, 0x57, 0xf5, 0xd2, 0x98, 0xc0, 0xbd, 0x8b, 0xaf, 0x80, 0x7b, 0x37, + 0xc0, 0xa2, 0x8d, 0xcf, 0x5a, 0x5d, 0x22, 0x98, 0x7d, 0x31, 0x28, 0x7c, 0x6d, 0x21, 0x41, 0x52, + 0xf3, 0x7f, 0xe7, 0xe7, 0x93, 0x49, 0x6e, 0xf9, 0xa5, 0x48, 0xee, 0x44, 0xae, 0xbf, 0x3c, 0x25, + 0xd7, 0x5f, 0xb9, 0x30, 0xd7, 0xaf, 0x4c, 0xc1, 0xf5, 0xdf, 0x01, 0x4b, 0x36, 0x3e, 0x6b, 0x53, + 0x49, 0xcf, 0xf3, 0x7a, 0x89, 0x53, 0xb0, 0x76, 0x20, 0x42, 0xa1, 0x8e, 0x07, 0x66, 0xe3, 0x33, + 0x7d, 0xc0, 0x08, 0xe7, 0xe6, 0x11, 0x8d, 0x6f, 0x4b, 0x19, 0x8a, 0xb4, 0x12, 0xb0, 0xe3, 0x1f, + 0x50, 0x41, 0xca, 0x63, 0x40, 0x2e, 0x42, 0xa1, 0xee, 0xb2, 0x54, 0x1c, 0xee, 0x80, 0x75, 0x0f, + 0x1f, 0xb1, 0x7b, 0x04, 0x7b, 0xec, 0x80, 0x60, 0xb6, 0x6f, 0xda, 0xc4, 0xf5, 0x99, 0xba, 0x1e, + 0x35, 0x80, 0x75, 0x34, 0x41, 0x8f, 0x26, 0xce, 0x82, 0xdb, 0x60, 0x8d, 0xcb, 0xb7, 0xf8, 0x15, + 0x36, 0x5d, 0x27, 0x04, 0x7b, 0x43, 0x80, 0xbd, 0x39, 0x1a, 0xd6, 0xd7, 0x50, 0x56, 0x8d, 0x26, + 0xcd, 0x81, 0x3f, 0x01, 0xab, 0x5c, 0xbc, 0x43, 0x30, 0x25, 0x21, 0xce, 0xb7, 0x02, 0x5a, 0xcd, + 0x4f, 0x22, 0x4a, 0xe9, 0x50, 0xc6, 0x1a, 0x6e, 0x82, 0x2a, 0x97, 0x6d, 0xba, 0xb6, 0x6d, 0x46, + 0x79, 0xbd, 0x29, 0x20, 0x44, 0x21, 0x47, 0x69, 0x25, 0xca, 0xda, 0x4f, 0xff, 0x54, 0xf9, 0xd3, + 0x3c, 0x58, 0x9b, 0xd0, 0xd4, 0x78, 0x7e, 0x94, 0xb9, 0x1e, 0xee, 0x92, 0xf8, 0x68, 0x2b, 0x71, + 0x7e, 0x9d, 0x94, 0x0e, 0x65, 0xac, 0xe1, 0x13, 0x00, 0x82, 0xe6, 0xdf, 0x76, 0x0f, 0xa5, 0x63, + 0xfd, 0x0e, 0xdf, 0xea, 0x56, 0x24, 0x7d, 0x3e, 0xac, 0xdf, 0x98, 0xf4, 0x7f, 0x4d, 0x18, 0x0f, + 0x7b, 0xec, 0x5a, 0xbe, 0x4d, 0xe2, 0x09, 0x28, 0x01, 0x09, 0x7f, 0x0e, 0x40, 0x5f, 0xe8, 0x3b, + 0xe6, 0x2f, 0xc3, 0xe6, 0xfe, 0xb5, 0x3f, 0xfc, 0x6b, 0xe1, 0x5f, 0x4b, 0xda, 0x4f, 0x7d, 0xec, + 0x30, 0x7e, 0x3f, 0xc4, 0xd9, 0x7b, 0x1c, 0xa1, 0xa0, 0x04, 0xa2, 0xfe, 0xe9, 0xd3, 0x67, 0xb5, + 0xb9, 0xcf, 0x9f, 0xd5, 0xe6, 0xbe, 0x78, 0x56, 0x9b, 0xfb, 0xd5, 0xa8, 0xa6, 0x3c, 0x1d, 0xd5, + 0x94, 0xcf, 0x47, 0x35, 0xe5, 0x8b, 0x51, 0x4d, 0xf9, 0x72, 0x54, 0x53, 0x3e, 0xfb, 0xaa, 0x36, + 0xf7, 0xf1, 0xed, 0x97, 0xff, 0x6f, 0xf9, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x06, 0x32, + 0xa6, 0x98, 0x1e, 0x00, 0x00, } func (m *BusConfig) Marshal() (dAtA []byte, err error) { @@ -457,6 +615,30 @@ func (m *BusConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Kafka != nil { + { + size, err := m.Kafka.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.JetStream != nil { + { + size, err := m.JetStream.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.NATS != nil { { size, err := m.NATS.MarshalToSizedBuffer(dAtA[:i]) @@ -492,6 +674,23 @@ func (m *ContainerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x12 { size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -625,6 +824,42 @@ func (m *EventBusSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.JetStreamExotic != nil { + { + size, err := m.JetStreamExotic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Kafka != nil { + { + size, err := m.Kafka.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.JetStream != nil { + { + size, err := m.JetStream.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } if m.NATS != nil { { size, err := m.NATS.MarshalToSizedBuffer(dAtA[:i]) @@ -683,54 +918,7 @@ func (m *EventBusStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NATSBus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NATSBus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NATSBus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Exotic != nil { - { - size, err := m.Exotic.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Native != nil { - { - size, err := m.Native.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NATSConfig) Marshal() (dAtA []byte, err error) { +func (m *JetStreamBus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -740,86 +928,59 @@ func (m *NATSConfig) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NATSConfig) MarshalTo(dAtA []byte) (int, error) { +func (m *JetStreamBus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NATSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *JetStreamBus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.AccessSecret != nil { - { - size, err := m.AccessSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Auth != nil { - i -= len(*m.Auth) - copy(dAtA[i:], *m.Auth) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Auth))) + if m.MaxPayload != nil { + i -= len(*m.MaxPayload) + copy(dAtA[i:], *m.MaxPayload) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MaxPayload))) i-- - dAtA[i] = 0x1a - } - if m.ClusterID != nil { - i -= len(*m.ClusterID) - copy(dAtA[i:], *m.ClusterID) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterID))) + dAtA[i] = 0x1 i-- - dAtA[i] = 0x12 - } - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *NativeStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + dAtA[i] = 0x9a } - return dAtA[:n], nil -} - -func (m *NativeStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxBytes != nil { - i -= len(*m.MaxBytes) - copy(dAtA[i:], *m.MaxBytes) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MaxBytes))) + if m.StreamConfig != nil { + i -= len(*m.StreamConfig) + copy(dAtA[i:], *m.StreamConfig) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StreamConfig))) i-- dAtA[i] = 0x1 i-- dAtA[i] = 0x92 } - if m.MaxMsgs != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxMsgs)) + if len(m.StartArgs) > 0 { + for iNdEx := len(m.StartArgs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.StartArgs[iNdEx]) + copy(dAtA[i:], m.StartArgs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StartArgs[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if m.Settings != nil { + i -= len(*m.Settings) + copy(dAtA[i:], *m.Settings) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Settings))) i-- dAtA[i] = 0x1 i-- - dAtA[i] = 0x88 + dAtA[i] = 0x82 } + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x7a if m.Affinity != nil { { size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) @@ -830,25 +991,18 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 + dAtA[i] = 0x72 } if m.Priority != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) i-- - dAtA[i] = 0x78 + dAtA[i] = 0x68 } i -= len(m.PriorityClassName) copy(dAtA[i:], m.PriorityClassName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) i-- - dAtA[i] = 0x72 - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i-- - dAtA[i] = 0x6a + dAtA[i] = 0x62 if len(m.ImagePullSecrets) > 0 { for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { { @@ -860,16 +1014,9 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x62 + dAtA[i] = 0x5a } } - if m.MaxAge != nil { - i -= len(*m.MaxAge) - copy(dAtA[i:], *m.MaxAge) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MaxAge))) - i-- - dAtA[i] = 0x5a - } if m.SecurityContext != nil { { size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) @@ -882,18 +1029,6 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x52 } - if m.Metadata != nil { - { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } if len(m.Tolerations) > 0 { for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { { @@ -905,7 +1040,7 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x4a } } if len(m.NodeSelector) > 0 { @@ -929,12 +1064,24 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x42 } } - if m.MetricsContainerTemplate != nil { + if m.Metadata != nil { { - size, err := m.MetricsContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Persistence != nil { + { + size, err := m.Persistence.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -944,9 +1091,9 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - if m.ContainerTemplate != nil { + if m.MetricsContainerTemplate != nil { { - size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.MetricsContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -956,9 +1103,9 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x2a } - if m.Persistence != nil { + if m.ReloaderContainerTemplate != nil { { - size, err := m.Persistence.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ReloaderContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -968,28 +1115,77 @@ func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } + if m.ContainerTemplate != nil { + { + size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) i-- - if m.DeprecatedAntiAffinity { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JetStreamConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *JetStreamConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JetStreamConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StreamConfig) + copy(dAtA[i:], m.StreamConfig) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StreamConfig))) i-- - dAtA[i] = 0x18 - if m.Auth != nil { - i -= len(*m.Auth) - copy(dAtA[i:], *m.Auth) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Auth))) + dAtA[i] = 0x1a + if m.AccessSecret != nil { + { + size, err := m.AccessSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PersistenceStrategy) Marshal() (dAtA []byte, err error) { +func (m *KafkaBus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -999,19 +1195,19 @@ func (m *PersistenceStrategy) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PersistenceStrategy) MarshalTo(dAtA []byte) (int, error) { +func (m *KafkaBus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PersistenceStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *KafkaBus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.VolumeSize != nil { + if m.ConsumerGroup != nil { { - size, err := m.VolumeSize.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ConsumerGroup.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1019,456 +1215,2777 @@ func (m *PersistenceStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x32 } - if m.AccessMode != nil { - i -= len(*m.AccessMode) - copy(dAtA[i:], *m.AccessMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AccessMode))) + if m.SASL != nil { + { + size, err := m.SASL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x2a } - if m.StorageClassName != nil { - i -= len(*m.StorageClassName) - copy(dAtA[i:], *m.StorageClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0xa + dAtA[i] = 0x22 } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x12 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *KafkaConsumerGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *BusConfig) Size() (n int) { - if m == nil { - return 0 - } + +func (m *KafkaConsumerGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KafkaConsumerGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.NATS != nil { - l = m.NATS.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.StartOldest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return n + i-- + dAtA[i] = 0x18 + i -= len(m.RebalanceStrategy) + copy(dAtA[i:], m.RebalanceStrategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RebalanceStrategy))) + i-- + dAtA[i] = 0x12 + i -= len(m.GroupName) + copy(dAtA[i:], m.GroupName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *ContainerTemplate) Size() (n int) { - if m == nil { - return 0 +func (m *NATSBus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *EventBus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *NATSBus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventBusList) Size() (n int) { - if m == nil { - return 0 - } +func (m *NATSBus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Exotic != nil { + { + size, err := m.Exotic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - return n + if m.Native != nil { + { + size, err := m.Native.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil } -func (m *EventBusSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NATS != nil { - l = m.NATS.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *NATSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *EventBusStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *NATSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NATSBus) Size() (n int) { - if m == nil { - return 0 - } +func (m *NATSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Native != nil { - l = m.Native.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AccessSecret != nil { + { + size, err := m.AccessSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.Exotic != nil { - l = m.Exotic.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Auth != nil { + i -= len(*m.Auth) + copy(dAtA[i:], *m.Auth) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Auth))) + i-- + dAtA[i] = 0x1a } - return n + if m.ClusterID != nil { + i -= len(*m.ClusterID) + copy(dAtA[i:], *m.ClusterID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterID))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NATSConfig) Size() (n int) { - if m == nil { - return 0 +func (m *NativeStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *NativeStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NativeStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - if m.ClusterID != nil { - l = len(*m.ClusterID) - n += 1 + l + sovGenerated(uint64(l)) + if m.RaftCommitTimeout != nil { + i -= len(*m.RaftCommitTimeout) + copy(dAtA[i:], *m.RaftCommitTimeout) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RaftCommitTimeout))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } - if m.Auth != nil { - l = len(*m.Auth) - n += 1 + l + sovGenerated(uint64(l)) + if m.RaftLeaseTimeout != nil { + i -= len(*m.RaftLeaseTimeout) + copy(dAtA[i:], *m.RaftLeaseTimeout) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RaftLeaseTimeout))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - if m.AccessSecret != nil { - l = m.AccessSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.RaftElectionTimeout != nil { + i -= len(*m.RaftElectionTimeout) + copy(dAtA[i:], *m.RaftElectionTimeout) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RaftElectionTimeout))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } - return n -} - -func (m *NativeStrategy) Size() (n int) { - if m == nil { - return 0 + if m.RaftHeartbeatTimeout != nil { + i -= len(*m.RaftHeartbeatTimeout) + copy(dAtA[i:], *m.RaftHeartbeatTimeout) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RaftHeartbeatTimeout))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if m.Auth != nil { - l = len(*m.Auth) - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxPayload != nil { + i -= len(*m.MaxPayload) + copy(dAtA[i:], *m.MaxPayload) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MaxPayload))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } - n += 2 - if m.Persistence != nil { - l = m.Persistence.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxSubs != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxSubs)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 } - if m.ContainerTemplate != nil { - l = m.ContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxBytes != nil { + i -= len(*m.MaxBytes) + copy(dAtA[i:], *m.MaxBytes) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MaxBytes))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - if m.MetricsContainerTemplate != nil { - l = m.MetricsContainerTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxMsgs != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxMsgs)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 } - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x7a } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x70 + } + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x6a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x62 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a } } - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxAge != nil { + i -= len(*m.MaxAge) + copy(dAtA[i:], *m.MaxAge) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MaxAge))) + i-- + dAtA[i] = 0x52 } if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - if m.MaxAge != nil { - l = len(*m.MaxAge) - n += 1 + l + sovGenerated(uint64(l)) + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PriorityClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 1 + sovGenerated(uint64(*m.Priority)) + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } } - if m.Affinity != nil { - l = m.Affinity.Size() - n += 2 + l + sovGenerated(uint64(l)) + if m.MetricsContainerTemplate != nil { + { + size, err := m.MetricsContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - if m.MaxMsgs != nil { - n += 2 + sovGenerated(uint64(*m.MaxMsgs)) + if m.ContainerTemplate != nil { + { + size, err := m.ContainerTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - if m.MaxBytes != nil { - l = len(*m.MaxBytes) - n += 2 + l + sovGenerated(uint64(l)) + if m.Persistence != nil { + { + size, err := m.Persistence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - return n + if m.Auth != nil { + i -= len(*m.Auth) + copy(dAtA[i:], *m.Auth) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Auth))) + i-- + dAtA[i] = 0x12 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil } -func (m *PersistenceStrategy) Size() (n int) { +func (m *PersistenceStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistenceStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistenceStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.VolumeSize != nil { + { + size, err := m.VolumeSize.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.AccessMode != nil { + i -= len(*m.AccessMode) + copy(dAtA[i:], *m.AccessMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AccessMode))) + i-- + dAtA[i] = 0x12 + } + if m.StorageClassName != nil { + i -= len(*m.StorageClassName) + copy(dAtA[i:], *m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BusConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.StorageClassName != nil { - l = len(*m.StorageClassName) + if m.NATS != nil { + l = m.NATS.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.AccessMode != nil { - l = len(*m.AccessMode) + if m.JetStream != nil { + l = m.JetStream.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.VolumeSize != nil { - l = m.VolumeSize.Size() + if m.Kafka != nil { + l = m.Kafka.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *BusConfig) String() string { - if this == nil { - return "nil" +func (m *ContainerTemplate) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&BusConfig{`, - `NATS:` + strings.Replace(this.NATS.String(), "NATSConfig", "NATSConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerTemplate) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ContainerTemplate{`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *EventBus) String() string { - if this == nil { - return "nil" + +func (m *EventBus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EventBus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EventBusSpec", "EventBusSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "EventBusStatus", "EventBusStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *EventBusList) String() string { - if this == nil { - return "nil" + +func (m *EventBusList) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForItems := "[]EventBus{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EventBus", "EventBus", 1), `&`, ``, 1) + "," + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems += "}" - s := strings.Join([]string{`&EventBusList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *EventBusSpec) String() string { - if this == nil { - return "nil" + +func (m *EventBusSpec) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&EventBusSpec{`, - `NATS:` + strings.Replace(this.NATS.String(), "NATSBus", "NATSBus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventBusStatus) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.NATS != nil { + l = m.NATS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&EventBusStatus{`, - `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "common.Status", 1), `&`, ``, 1) + `,`, - `Config:` + strings.Replace(strings.Replace(this.Config.String(), "BusConfig", "BusConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s + if m.JetStream != nil { + l = m.JetStream.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Kafka != nil { + l = m.Kafka.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JetStreamExotic != nil { + l = m.JetStreamExotic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *NATSBus) String() string { - if this == nil { - return "nil" + +func (m *EventBusStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NATSBus{`, - `Native:` + strings.Replace(this.Native.String(), "NativeStrategy", "NativeStrategy", 1) + `,`, - `Exotic:` + strings.Replace(this.Exotic.String(), "NATSConfig", "NATSConfig", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *NATSConfig) String() string { - if this == nil { - return "nil" + +func (m *JetStreamBus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&NATSConfig{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `ClusterID:` + valueToStringGenerated(this.ClusterID) + `,`, - `Auth:` + valueToStringGenerated(this.Auth) + `,`, - `AccessSecret:` + strings.Replace(fmt.Sprintf("%v", this.AccessSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReloaderContainerTemplate != nil { + l = m.ReloaderContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MetricsContainerTemplate != nil { + l = m.MetricsContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Persistence != nil { + l = m.Persistence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Settings != nil { + l = len(*m.Settings) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.StartArgs) > 0 { + for _, s := range m.StartArgs { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.StreamConfig != nil { + l = len(*m.StreamConfig) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.MaxPayload != nil { + l = len(*m.MaxPayload) + n += 2 + l + sovGenerated(uint64(l)) + } + return n } -func (this *NativeStrategy) String() string { - if this == nil { - return "nil" + +func (m *JetStreamConfig) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + if m.AccessSecret != nil { + l = m.AccessSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForTolerations += "}" - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + l = len(m.StreamConfig) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *KafkaBus) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForImagePullSecrets += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + if m.SASL != nil { + l = m.SASL.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&NativeStrategy{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Auth:` + valueToStringGenerated(this.Auth) + `,`, - `DeprecatedAntiAffinity:` + fmt.Sprintf("%v", this.DeprecatedAntiAffinity) + `,`, - `Persistence:` + strings.Replace(this.Persistence.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, - `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `MetricsContainerTemplate:` + strings.Replace(this.MetricsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "Metadata", "common.Metadata", 1) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, - `MaxAge:` + valueToStringGenerated(this.MaxAge) + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, - `MaxMsgs:` + valueToStringGenerated(this.MaxMsgs) + `,`, - `MaxBytes:` + valueToStringGenerated(this.MaxBytes) + `,`, - `}`, - }, "") - return s + if m.ConsumerGroup != nil { + l = m.ConsumerGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *PersistenceStrategy) String() string { - if this == nil { - return "nil" + +func (m *KafkaConsumerGroup) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&PersistenceStrategy{`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `AccessMode:` + valueToStringGenerated(this.AccessMode) + `,`, - `VolumeSize:` + strings.Replace(fmt.Sprintf("%v", this.VolumeSize), "Quantity", "resource.Quantity", 1) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.GroupName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RebalanceStrategy) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (m *NATSBus) Size() (n int) { + if m == nil { + return 0 } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + var l int + _ = l + if m.Native != nil { + l = m.Native.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Exotic != nil { + l = m.Exotic.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *BusConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BusConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BusConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + +func (m *NATSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + if m.ClusterID != nil { + l = len(*m.ClusterID) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Auth != nil { + l = len(*m.Auth) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AccessSecret != nil { + l = m.AccessSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NativeStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if m.Auth != nil { + l = len(*m.Auth) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Persistence != nil { + l = m.Persistence.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ContainerTemplate != nil { + l = m.ContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MetricsContainerTemplate != nil { + l = m.MetricsContainerTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxAge != nil { + l = len(*m.MaxAge) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PriorityClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxMsgs != nil { + n += 2 + sovGenerated(uint64(*m.MaxMsgs)) + } + if m.MaxBytes != nil { + l = len(*m.MaxBytes) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.MaxSubs != nil { + n += 2 + sovGenerated(uint64(*m.MaxSubs)) + } + if m.MaxPayload != nil { + l = len(*m.MaxPayload) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.RaftHeartbeatTimeout != nil { + l = len(*m.RaftHeartbeatTimeout) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.RaftElectionTimeout != nil { + l = len(*m.RaftElectionTimeout) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.RaftLeaseTimeout != nil { + l = len(*m.RaftLeaseTimeout) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.RaftCommitTimeout != nil { + l = len(*m.RaftCommitTimeout) + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistenceStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AccessMode != nil { + l = len(*m.AccessMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeSize != nil { + l = m.VolumeSize.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BusConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BusConfig{`, + `NATS:` + strings.Replace(this.NATS.String(), "NATSConfig", "NATSConfig", 1) + `,`, + `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamConfig", "JetStreamConfig", 1) + `,`, + `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaBus", "KafkaBus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerTemplate{`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "v1.SecurityContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventBus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventBus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EventBusSpec", "EventBusSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "EventBusStatus", "EventBusStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventBusList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EventBus{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EventBus", "EventBus", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventBusList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventBusSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventBusSpec{`, + `NATS:` + strings.Replace(this.NATS.String(), "NATSBus", "NATSBus", 1) + `,`, + `JetStream:` + strings.Replace(this.JetStream.String(), "JetStreamBus", "JetStreamBus", 1) + `,`, + `Kafka:` + strings.Replace(this.Kafka.String(), "KafkaBus", "KafkaBus", 1) + `,`, + `JetStreamExotic:` + strings.Replace(this.JetStreamExotic.String(), "JetStreamConfig", "JetStreamConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventBusStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventBusStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "common.Status", 1), `&`, ``, 1) + `,`, + `Config:` + strings.Replace(strings.Replace(this.Config.String(), "BusConfig", "BusConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JetStreamBus) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + } + repeatedStringForImagePullSecrets += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&JetStreamBus{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `ReloaderContainerTemplate:` + strings.Replace(this.ReloaderContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `MetricsContainerTemplate:` + strings.Replace(this.MetricsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `Persistence:` + strings.Replace(this.Persistence.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, + `Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "Metadata", "common.Metadata", 1) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `Settings:` + valueToStringGenerated(this.Settings) + `,`, + `StartArgs:` + fmt.Sprintf("%v", this.StartArgs) + `,`, + `StreamConfig:` + valueToStringGenerated(this.StreamConfig) + `,`, + `MaxPayload:` + valueToStringGenerated(this.MaxPayload) + `,`, + `}`, + }, "") + return s +} +func (this *JetStreamConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JetStreamConfig{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `AccessSecret:` + strings.Replace(fmt.Sprintf("%v", this.AccessSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `StreamConfig:` + fmt.Sprintf("%v", this.StreamConfig) + `,`, + `}`, + }, "") + return s +} +func (this *KafkaBus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KafkaBus{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `SASL:` + strings.Replace(fmt.Sprintf("%v", this.SASL), "SASLConfig", "common.SASLConfig", 1) + `,`, + `ConsumerGroup:` + strings.Replace(this.ConsumerGroup.String(), "KafkaConsumerGroup", "KafkaConsumerGroup", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KafkaConsumerGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KafkaConsumerGroup{`, + `GroupName:` + fmt.Sprintf("%v", this.GroupName) + `,`, + `RebalanceStrategy:` + fmt.Sprintf("%v", this.RebalanceStrategy) + `,`, + `StartOldest:` + fmt.Sprintf("%v", this.StartOldest) + `,`, + `}`, + }, "") + return s +} +func (this *NATSBus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NATSBus{`, + `Native:` + strings.Replace(this.Native.String(), "NativeStrategy", "NativeStrategy", 1) + `,`, + `Exotic:` + strings.Replace(this.Exotic.String(), "NATSConfig", "NATSConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NATSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NATSConfig{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `ClusterID:` + valueToStringGenerated(this.ClusterID) + `,`, + `Auth:` + valueToStringGenerated(this.Auth) + `,`, + `AccessSecret:` + strings.Replace(fmt.Sprintf("%v", this.AccessSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NativeStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + } + repeatedStringForImagePullSecrets += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&NativeStrategy{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Auth:` + valueToStringGenerated(this.Auth) + `,`, + `Persistence:` + strings.Replace(this.Persistence.String(), "PersistenceStrategy", "PersistenceStrategy", 1) + `,`, + `ContainerTemplate:` + strings.Replace(this.ContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `MetricsContainerTemplate:` + strings.Replace(this.MetricsContainerTemplate.String(), "ContainerTemplate", "ContainerTemplate", 1) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "Metadata", "common.Metadata", 1) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `MaxAge:` + valueToStringGenerated(this.MaxAge) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `MaxMsgs:` + valueToStringGenerated(this.MaxMsgs) + `,`, + `MaxBytes:` + valueToStringGenerated(this.MaxBytes) + `,`, + `MaxSubs:` + valueToStringGenerated(this.MaxSubs) + `,`, + `MaxPayload:` + valueToStringGenerated(this.MaxPayload) + `,`, + `RaftHeartbeatTimeout:` + valueToStringGenerated(this.RaftHeartbeatTimeout) + `,`, + `RaftElectionTimeout:` + valueToStringGenerated(this.RaftElectionTimeout) + `,`, + `RaftLeaseTimeout:` + valueToStringGenerated(this.RaftLeaseTimeout) + `,`, + `RaftCommitTimeout:` + valueToStringGenerated(this.RaftCommitTimeout) + `,`, + `}`, + }, "") + return s +} +func (this *PersistenceStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistenceStrategy{`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `AccessMode:` + valueToStringGenerated(this.AccessMode) + `,`, + `VolumeSize:` + strings.Replace(fmt.Sprintf("%v", this.VolumeSize), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BusConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BusConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BusConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NATS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NATS == nil { + m.NATS = &NATSConfig{} + } + if err := m.NATS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JetStream", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JetStream == nil { + m.JetStream = &JetStreamConfig{} + } + if err := m.JetStream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kafka == nil { + m.Kafka = &KafkaBus{} + } + if err := m.Kafka.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = k8s_io_api_core_v1.PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBusList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EventBus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBusSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBusSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBusSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NATS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NATS == nil { + m.NATS = &NATSBus{} + } + if err := m.NATS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JetStream", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JetStream == nil { + m.JetStream = &JetStreamBus{} + } + if err := m.JetStream.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Kafka == nil { + m.Kafka = &KafkaBus{} + } + if err := m.Kafka.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JetStreamExotic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JetStreamExotic == nil { + m.JetStreamExotic = &JetStreamConfig{} + } + if err := m.JetStreamExotic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBusStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBusStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBusStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JetStreamBus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JetStreamBus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JetStreamBus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ContainerTemplate == nil { + m.ContainerTemplate = &ContainerTemplate{} + } + if err := m.ContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReloaderContainerTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReloaderContainerTemplate == nil { + m.ReloaderContainerTemplate = &ContainerTemplate{} + } + if err := m.ReloaderContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricsContainerTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricsContainerTemplate == nil { + m.MetricsContainerTemplate = &ContainerTemplate{} + } + if err := m.MetricsContainerTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Persistence == nil { + m.Persistence = &PersistenceStrategy{} + } + if err := m.Persistence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = &common.Metadata{} + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v1.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &v1.PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, v1.LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &v1.Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Settings", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Settings = &s + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartArgs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StartArgs = append(m.StartArgs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StreamConfig = &s + iNdEx = postIndex + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NATS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxPayload", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1478,27 +3995,24 @@ func (m *BusConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.NATS == nil { - m.NATS = &NATSConfig{} - } - if err := m.NATS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.MaxPayload = &s iNdEx = postIndex default: iNdEx = preIndex @@ -1521,7 +4035,7 @@ func (m *BusConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { +func (m *JetStreamConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1544,15 +4058,47 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerTemplate: wiretype end group for non-group") + return fmt.Errorf("proto: JetStreamConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: JetStreamConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1579,10 +4125,45 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.AccessSecret == nil { + m.AccessSecret = &v1.SecretKeySelector{} + } + if err := m.AccessSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StreamConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1604,7 +4185,7 @@ func (m *ContainerTemplate) Unmarshal(dAtA []byte) error { } return nil } -func (m *EventBus) Unmarshal(dAtA []byte) error { +func (m *KafkaBus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1627,17 +4208,17 @@ func (m *EventBus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EventBus: wiretype end group for non-group") + return fmt.Errorf("proto: KafkaBus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EventBus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KafkaBus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1647,30 +4228,29 @@ func (m *EventBus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1680,30 +4260,29 @@ func (m *EventBus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1713,78 +4292,27 @@ func (m *EventBus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventBusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventBusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventBusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1811,13 +4339,16 @@ func (m *EventBusList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1844,64 +4375,16 @@ func (m *EventBusList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, EventBus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.SASL == nil { + m.SASL = &common.SASLConfig{} } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { + if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventBusSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventBusSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventBusSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NATS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGroup", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -1928,10 +4411,10 @@ func (m *EventBusSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NATS == nil { - m.NATS = &NATSBus{} + if m.ConsumerGroup == nil { + m.ConsumerGroup = &KafkaConsumerGroup{} } - if err := m.NATS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConsumerGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1956,7 +4439,7 @@ func (m *EventBusSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *EventBusStatus) Unmarshal(dAtA []byte) error { +func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1979,17 +4462,17 @@ func (m *EventBusStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EventBusStatus: wiretype end group for non-group") + return fmt.Errorf("proto: KafkaConsumerGroup: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EventBusStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KafkaConsumerGroup: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GroupName", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1999,30 +4482,29 @@ func (m *EventBusStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.GroupName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RebalanceStrategy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2032,25 +4514,44 @@ func (m *EventBusStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.RebalanceStrategy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartOldest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StartOldest = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2460,26 +4961,6 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { m.Auth = &s iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedAntiAffinity", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DeprecatedAntiAffinity = bool(v != 0) - case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) } @@ -2515,7 +4996,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ContainerTemplate", wireType) } @@ -2551,7 +5032,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 6: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MetricsContainerTemplate", wireType) } @@ -2587,7 +5068,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 7: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) } @@ -2714,7 +5195,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { } m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex - case 8: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) } @@ -2748,7 +5229,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 9: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -2784,7 +5265,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 10: + case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } @@ -2820,7 +5301,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 11: + case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MaxAge", wireType) } @@ -2853,7 +5334,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.MaxAge = &s iNdEx = postIndex - case 12: + case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) } @@ -2887,7 +5368,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 13: + case 12: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) } @@ -2919,7 +5400,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { } m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 14: + case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) } @@ -2951,7 +5432,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { } m.PriorityClassName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 14: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) } @@ -2971,7 +5452,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { } } m.Priority = &v - case 16: + case 15: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) } @@ -3007,7 +5488,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 17: + case 16: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field MaxMsgs", wireType) } @@ -3027,7 +5508,7 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { } } m.MaxMsgs = &v - case 18: + case 17: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field MaxBytes", wireType) } @@ -3060,6 +5541,191 @@ func (m *NativeStrategy) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.MaxBytes = &s iNdEx = postIndex + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSubs", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxSubs = &v + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPayload", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.MaxPayload = &s + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftHeartbeatTimeout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RaftHeartbeatTimeout = &s + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftElectionTimeout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RaftElectionTimeout = &s + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftLeaseTimeout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RaftLeaseTimeout = &s + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftCommitTimeout", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RaftCommitTimeout = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/pkg/apis/eventbus/v1alpha1/generated.proto b/pkg/apis/eventbus/v1alpha1/generated.proto index 6b817ec563..d05cb3d67a 100644 --- a/pkg/apis/eventbus/v1alpha1/generated.proto +++ b/pkg/apis/eventbus/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package github.com.argoproj.argo_events.pkg.apis.eventbus.v1alpha1; @@ -28,16 +28,27 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1"; // BusConfig has the finalized configuration for EventBus message BusConfig { + // +optional optional NATSConfig nats = 1; + + // +optional + optional JetStreamConfig jetstream = 2; + + // +optional + optional KafkaBus kafka = 3; } // ContainerTemplate defines customized spec for a container message ContainerTemplate { optional k8s.io.api.core.v1.ResourceRequirements resources = 1; + + optional string imagePullPolicy = 2; + + optional k8s.io.api.core.v1.SecurityContext securityContext = 3; } // EventBus is the definition of a eventbus resource @@ -66,7 +77,19 @@ message EventBusList { // EventBusSpec refers to specification of eventbus resource message EventBusSpec { // NATS eventbus + // +optional optional NATSBus nats = 1; + + // +optional + optional JetStreamBus jetstream = 2; + + // +optional + // Kafka eventbus + optional KafkaBus kafka = 3; + + // Exotic JetStream + // +optional + optional JetStreamConfig jetstreamExotic = 4; } // EventBusStatus holds the status of the eventbus resource @@ -77,6 +100,158 @@ message EventBusStatus { optional BusConfig config = 2; } +// JetStreamBus holds the JetStream EventBus information +message JetStreamBus { + // JetStream version, such as "2.7.3" + optional string version = 1; + + // JetStream StatefulSet size + // +kubebuilder:default=3 + optional int32 replicas = 2; + + // ContainerTemplate contains customized spec for Nats JetStream container + // +optional + optional ContainerTemplate containerTemplate = 3; + + // ReloaderContainerTemplate contains customized spec for config reloader container + // +optional + optional ContainerTemplate reloaderContainerTemplate = 4; + + // MetricsContainerTemplate contains customized spec for metrics container + // +optional + optional ContainerTemplate metricsContainerTemplate = 5; + + // +optional + optional PersistenceStrategy persistence = 6; + + // Metadata sets the pods's metadata, i.e. annotations and labels + optional github.com.argoproj.argo_events.pkg.apis.common.Metadata metadata = 7; + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + map nodeSelector = 8; + + // If specified, the pod's tolerations. + // +optional + repeated k8s.io.api.core.v1.Toleration tolerations = 9; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional k8s.io.api.core.v1.PodSecurityContext securityContext = 10; + + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 11; + + // If specified, indicates the Redis pod's priority. "system-node-critical" + // and "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + // +optional + optional string priorityClassName = 12; + + // The priority value. Various system components use this field to find the + // priority of the Redis pod. When Priority Admission Controller is enabled, + // it prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + // +optional + optional int32 priority = 13; + + // The pod's scheduling constraints + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + // +optional + optional k8s.io.api.core.v1.Affinity affinity = 14; + + // ServiceAccountName to apply to the StatefulSet + // +optional + optional string serviceAccountName = 15; + + // JetStream configuration, if not specified, global settings in controller-config will be used. + // See https://docs.nats.io/running-a-nats-service/configuration#jetstream. + // Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded. + // +optional + optional string settings = 16; + + // Optional arguments to start nats-server. For example, "-D" to enable debugging output, "-DV" to enable debugging and tracing. + // Check https://docs.nats.io/ for all the available arguments. + // +optional + repeated string startArgs = 17; + + // Optional configuration for the streams to be created in this JetStream service, if specified, it will be merged with the default configuration in controller-config. + // It accepts a YAML format configuration, available fields include, "maxBytes", "maxMsgs", "maxAge" (e.g. 72h), "replicas" (1, 3, 5), "duplicates" (e.g. 5m). + // +optional + optional string streamConfig = 18; + + // Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB + // +optional + optional string maxPayload = 19; +} + +message JetStreamConfig { + // JetStream (Nats) URL + optional string url = 1; + + // Secret for auth + // +optional + optional k8s.io.api.core.v1.SecretKeySelector accessSecret = 2; + + // +optional + optional string streamConfig = 3; +} + +// KafkaBus holds the KafkaBus EventBus information +message KafkaBus { + // URL to kafka cluster, multiple URLs separated by comma + optional string url = 1; + + // Topic name, defaults to {namespace_name}-{eventbus_name} + // +optional + optional string topic = 2; + + // Kafka version, sarama defaults to the oldest supported stable version + // +optional + optional string version = 3; + + // TLS configuration for the kafka client. + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.TLSConfig tls = 4; + + // SASL configuration for the kafka client + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.SASLConfig sasl = 5; + + // Consumer group for kafka client + // +optional + optional KafkaConsumerGroup consumerGroup = 6; +} + +message KafkaConsumerGroup { + // Consumer group name, defaults to {namespace_name}-{sensor_name} + // +optional + optional string groupName = 1; + + // Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default. + // +optional + optional string rebalanceStrategy = 2; + + // When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false + // +optional + optional bool startOldest = 3; +} + // NATSBus holds the NATS eventbus information message NATSBus { // Native means to bring up a native NATS service @@ -110,41 +285,38 @@ message NativeStrategy { optional string auth = 2; - // Deprecated, use Affinity instead, will be removed in v1.5 - optional bool antiAffinity = 3; - // +optional - optional PersistenceStrategy persistence = 4; + optional PersistenceStrategy persistence = 3; // ContainerTemplate contains customized spec for NATS container // +optional - optional ContainerTemplate containerTemplate = 5; + optional ContainerTemplate containerTemplate = 4; // MetricsContainerTemplate contains customized spec for metrics container // +optional - optional ContainerTemplate metricsContainerTemplate = 6; + optional ContainerTemplate metricsContainerTemplate = 5; // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional - map nodeSelector = 7; + map nodeSelector = 6; // If specified, the pod's tolerations. // +optional - repeated k8s.io.api.core.v1.Toleration tolerations = 8; + repeated k8s.io.api.core.v1.Toleration tolerations = 7; // Metadata sets the pods's metadata, i.e. annotations and labels - optional github.com.argoproj.argo_events.pkg.apis.common.Metadata metadata = 9; + optional github.com.argoproj.argo_events.pkg.apis.common.Metadata metadata = 8; // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - optional k8s.io.api.core.v1.PodSecurityContext securityContext = 10; + optional k8s.io.api.core.v1.PodSecurityContext securityContext = 9; // Max Age of existing messages, i.e. "72h", “4h35m” // +optional - optional string maxAge = 11; + optional string maxAge = 10; // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, @@ -153,11 +325,11 @@ message NativeStrategy { // +optional // +patchMergeKey=name // +patchStrategy=merge - repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 12; + repeated k8s.io.api.core.v1.LocalObjectReference imagePullSecrets = 11; // ServiceAccountName to apply to NATS StatefulSet // +optional - optional string serviceAccountName = 13; + optional string serviceAccountName = 12; // If specified, indicates the EventSource pod's priority. "system-node-critical" // and "system-cluster-critical" are two special keywords which indicate the @@ -167,7 +339,7 @@ message NativeStrategy { // default. // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ // +optional - optional string priorityClassName = 14; + optional string priorityClassName = 13; // The priority value. Various system components use this field to find the // priority of the EventSource pod. When Priority Admission Controller is enabled, @@ -176,18 +348,36 @@ message NativeStrategy { // The higher the value, the higher the priority. // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ // +optional - optional int32 priority = 15; + optional int32 priority = 14; // The pod's scheduling constraints // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ // +optional - optional k8s.io.api.core.v1.Affinity affinity = 16; + optional k8s.io.api.core.v1.Affinity affinity = 15; // Maximum number of messages per channel, 0 means unlimited. Defaults to 1000000 - optional uint64 maxMsgs = 17; + optional uint64 maxMsgs = 16; // Total size of messages per channel, 0 means unlimited. Defaults to 1GB - optional string maxBytes = 18; + optional string maxBytes = 17; + + // Maximum number of subscriptions per channel, 0 means unlimited. Defaults to 1000 + optional uint64 maxSubs = 18; + + // Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB + optional string maxPayload = 19; + + // Specifies the time in follower state without a leader before attempting an election, i.e. "72h", “4h35m”. Defaults to 2s + optional string raftHeartbeatTimeout = 20; + + // Specifies the time in candidate state without a leader before attempting an election, i.e. "72h", “4h35m”. Defaults to 2s + optional string raftElectionTimeout = 21; + + // Specifies how long a leader waits without being able to contact a quorum of nodes before stepping down as leader, i.e. "72h", “4h35m”. Defaults to 1s + optional string raftLeaseTimeout = 22; + + // Specifies the time without an Apply() operation before sending an heartbeat to ensure timely commit, i.e. "72h", “4h35m”. Defaults to 100ms + optional string raftCommitTimeout = 23; } // PersistenceStrategy defines the strategy of persistence diff --git a/pkg/apis/eventbus/v1alpha1/jetstream_eventbus.go b/pkg/apis/eventbus/v1alpha1/jetstream_eventbus.go new file mode 100644 index 0000000000..97cad48fe2 --- /dev/null +++ b/pkg/apis/eventbus/v1alpha1/jetstream_eventbus.go @@ -0,0 +1,105 @@ +package v1alpha1 + +import ( + "github.com/argoproj/argo-events/pkg/apis/common" + corev1 "k8s.io/api/core/v1" +) + +// JetStreamBus holds the JetStream EventBus information +type JetStreamBus struct { + // JetStream version, such as "2.7.3" + Version string `json:"version,omitempty" protobuf:"bytes,1,opt,name=version"` + // JetStream StatefulSet size + // +kubebuilder:default=3 + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + // ContainerTemplate contains customized spec for Nats JetStream container + // +optional + ContainerTemplate *ContainerTemplate `json:"containerTemplate,omitempty" protobuf:"bytes,3,opt,name=containerTemplate"` + // ReloaderContainerTemplate contains customized spec for config reloader container + // +optional + ReloaderContainerTemplate *ContainerTemplate `json:"reloaderContainerTemplate,omitempty" protobuf:"bytes,4,opt,name=reloaderContainerTemplate"` + // MetricsContainerTemplate contains customized spec for metrics container + // +optional + MetricsContainerTemplate *ContainerTemplate `json:"metricsContainerTemplate,omitempty" protobuf:"bytes,5,opt,name=metricsContainerTemplate"` + // +optional + Persistence *PersistenceStrategy `json:"persistence,omitempty" protobuf:"bytes,6,opt,name=persistence"` + // Metadata sets the pods's metadata, i.e. annotations and labels + Metadata *common.Metadata `json:"metadata,omitempty" protobuf:"bytes,7,opt,name=metadata"` + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,8,rep,name=nodeSelector"` + // If specified, the pod's tolerations. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,9,rep,name=tolerations"` + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,10,opt,name=securityContext"` + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=imagePullSecrets"` + // If specified, indicates the Redis pod's priority. "system-node-critical" + // and "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + // +optional + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,12,opt,name=priorityClassName"` + // The priority value. Various system components use this field to find the + // priority of the Redis pod. When Priority Admission Controller is enabled, + // it prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + // +optional + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,13,opt,name=priority"` + // The pod's scheduling constraints + // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,14,opt,name=affinity"` + // ServiceAccountName to apply to the StatefulSet + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,15,opt,name=serviceAccountName"` + // JetStream configuration, if not specified, global settings in controller-config will be used. + // See https://docs.nats.io/running-a-nats-service/configuration#jetstream. + // Only configure "max_memory_store" or "max_file_store", do not set "store_dir" as it has been hardcoded. + // +optional + Settings *string `json:"settings,omitempty" protobuf:"bytes,16,opt,name=settings"` + // Optional arguments to start nats-server. For example, "-D" to enable debugging output, "-DV" to enable debugging and tracing. + // Check https://docs.nats.io/ for all the available arguments. + // +optional + StartArgs []string `json:"startArgs,omitempty" protobuf:"bytes,17,rep,name=startArgs"` + // Optional configuration for the streams to be created in this JetStream service, if specified, it will be merged with the default configuration in controller-config. + // It accepts a YAML format configuration, available fields include, "maxBytes", "maxMsgs", "maxAge" (e.g. 72h), "replicas" (1, 3, 5), "duplicates" (e.g. 5m). + // +optional + StreamConfig *string `json:"streamConfig,omitempty" protobuf:"bytes,18,opt,name=streamConfig"` + // Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB + // +optional + MaxPayload *string `json:"maxPayload,omitempty" protobuf:"bytes,19,opt,name=maxPayload"` +} + +func (j JetStreamBus) GetReplicas() int { + if j.Replicas == nil { + return 3 + } + return int(*j.Replicas) +} + +type JetStreamConfig struct { + // JetStream (Nats) URL + URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` + // Secret for auth + // +optional + AccessSecret *corev1.SecretKeySelector `json:"accessSecret,omitempty" protobuf:"bytes,2,opt,name=accessSecret"` + // +optional + StreamConfig string `json:"streamConfig,omitempty" protobuf:"bytes,3,opt,name=streamConfig"` +} diff --git a/pkg/apis/eventbus/v1alpha1/kafka_eventbus.go b/pkg/apis/eventbus/v1alpha1/kafka_eventbus.go new file mode 100644 index 0000000000..fa4f754963 --- /dev/null +++ b/pkg/apis/eventbus/v1alpha1/kafka_eventbus.go @@ -0,0 +1,38 @@ +package v1alpha1 + +import ( + apicommon "github.com/argoproj/argo-events/pkg/apis/common" +) + +// KafkaBus holds the KafkaBus EventBus information +type KafkaBus struct { + // URL to kafka cluster, multiple URLs separated by comma + URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` + // Topic name, defaults to {namespace_name}-{eventbus_name} + // +optional + Topic string `json:"topic,omitempty" protobuf:"bytes,2,opt,name=topic"` + // Kafka version, sarama defaults to the oldest supported stable version + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"` + // TLS configuration for the kafka client. + // +optional + TLS *apicommon.TLSConfig `json:"tls,omitempty" protobuf:"bytes,4,opt,name=tls"` + // SASL configuration for the kafka client + // +optional + SASL *apicommon.SASLConfig `json:"sasl,omitempty" protobuf:"bytes,5,opt,name=sasl"` + // Consumer group for kafka client + // +optional + ConsumerGroup *KafkaConsumerGroup `json:"consumerGroup,omitempty" protobuf:"bytes,6,opt,name=consumerGroup"` +} + +type KafkaConsumerGroup struct { + // Consumer group name, defaults to {namespace_name}-{sensor_name} + // +optional + GroupName string `json:"groupName,omitempty" protobuf:"bytes,1,opt,name=groupName"` + // Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default. + // +optional + RebalanceStrategy string `json:"rebalanceStrategy,omitempty" protobuf:"bytes,2,opt,name=rebalanceStrategy"` + // When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false + // +optional + StartOldest bool `json:"startOldest,omitempty" default:"false" protobuf:"bytes,3,opt,name=startOldest"` +} diff --git a/pkg/apis/eventbus/v1alpha1/types.go b/pkg/apis/eventbus/v1alpha1/nats_eventbus.go similarity index 51% rename from pkg/apis/eventbus/v1alpha1/types.go rename to pkg/apis/eventbus/v1alpha1/nats_eventbus.go index c8c9799a3e..dd557b8dca 100644 --- a/pkg/apis/eventbus/v1alpha1/types.go +++ b/pkg/apis/eventbus/v1alpha1/nats_eventbus.go @@ -1,49 +1,10 @@ package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" - apiresource "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-events/pkg/apis/common" + corev1 "k8s.io/api/core/v1" ) -// EventBus is the definition of a eventbus resource -// +genclient -// +kubebuilder:resource:singular=eventbus,shortName=eb -// +kubebuilder:subresource:status -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:openapi-gen=true -type EventBus struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - Spec EventBusSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // +optional - Status EventBusStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// EventBusList is the list of eventbus resources -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type EventBusList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - - Items []EventBus `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// EventBusSpec refers to specification of eventbus resource -type EventBusSpec struct { - // NATS eventbus - NATS *NATSBus `json:"nats,omitempty" protobuf:"bytes,1,opt,name=nats"` -} - -// EventBusStatus holds the status of the eventbus resource -type EventBusStatus struct { - common.Status `json:",inline" protobuf:"bytes,1,opt,name=status"` - // Config holds the fininalized configuration of EventBus - Config BusConfig `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` -} - // NATSBus holds the NATS eventbus information type NATSBus struct { // Native means to bring up a native NATS service @@ -59,6 +20,7 @@ type AuthStrategy string var ( AuthStrategyNone AuthStrategy = "none" AuthStrategyToken AuthStrategy = "token" + AuthStrategyBasic AuthStrategy = "basic" ) // NativeStrategy indicates to install a native NATS service @@ -66,33 +28,31 @@ type NativeStrategy struct { // Size is the NATS StatefulSet size Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` Auth *AuthStrategy `json:"auth,omitempty" protobuf:"bytes,2,opt,name=auth,casttype=AuthStrategy"` - // Deprecated, use Affinity instead, will be removed in v1.5 - DeprecatedAntiAffinity bool `json:"antiAffinity,omitempty" protobuf:"varint,3,opt,name=antiAffinity"` // +optional - Persistence *PersistenceStrategy `json:"persistence,omitempty" protobuf:"bytes,4,opt,name=persistence"` + Persistence *PersistenceStrategy `json:"persistence,omitempty" protobuf:"bytes,3,opt,name=persistence"` // ContainerTemplate contains customized spec for NATS container // +optional - ContainerTemplate *ContainerTemplate `json:"containerTemplate,omitempty" protobuf:"bytes,5,opt,name=containerTemplate"` + ContainerTemplate *ContainerTemplate `json:"containerTemplate,omitempty" protobuf:"bytes,4,opt,name=containerTemplate"` // MetricsContainerTemplate contains customized spec for metrics container // +optional - MetricsContainerTemplate *ContainerTemplate `json:"metricsContainerTemplate,omitempty" protobuf:"bytes,6,opt,name=metricsContainerTemplate"` + MetricsContainerTemplate *ContainerTemplate `json:"metricsContainerTemplate,omitempty" protobuf:"bytes,5,opt,name=metricsContainerTemplate"` // NodeSelector is a selector which must be true for the pod to fit on a node. // Selector which must match a node's labels for the pod to be scheduled on that node. // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,6,rep,name=nodeSelector"` // If specified, the pod's tolerations. // +optional - Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,8,rep,name=tolerations"` + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,7,rep,name=tolerations"` // Metadata sets the pods's metadata, i.e. annotations and labels - Metadata *common.Metadata `json:"metadata,omitempty" protobuf:"bytes,9,opt,name=metadata"` + Metadata *common.Metadata `json:"metadata,omitempty" protobuf:"bytes,8,opt,name=metadata"` // SecurityContext holds pod-level security attributes and common container settings. // Optional: Defaults to empty. See type description for default values of each field. // +optional - SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,10,opt,name=securityContext"` + SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,9,opt,name=securityContext"` // Max Age of existing messages, i.e. "72h", “4h35m” // +optional - MaxAge *string `json:"maxAge,omitempty" protobuf:"bytes,11,opt,name=maxAge"` + MaxAge *string `json:"maxAge,omitempty" protobuf:"bytes,10,opt,name=maxAge"` // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. // If specified, these secrets will be passed to individual puller implementations for them to use. For example, // in the case of docker, only DockerConfig type secrets are honored. @@ -100,10 +60,10 @@ type NativeStrategy struct { // +optional // +patchMergeKey=name // +patchStrategy=merge - ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,12,rep,name=imagePullSecrets"` + ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,11,rep,name=imagePullSecrets"` // ServiceAccountName to apply to NATS StatefulSet // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,13,opt,name=serviceAccountName"` + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,12,opt,name=serviceAccountName"` // If specified, indicates the EventSource pod's priority. "system-node-critical" // and "system-cluster-critical" are two special keywords which indicate the // highest priorities with the former being the highest priority. Any other @@ -112,7 +72,7 @@ type NativeStrategy struct { // default. // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ // +optional - PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,14,opt,name=priorityClassName"` + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,13,opt,name=priorityClassName"` // The priority value. Various system components use this field to find the // priority of the EventSource pod. When Priority Admission Controller is enabled, // it prevents users from setting this field. The admission controller populates @@ -120,20 +80,27 @@ type NativeStrategy struct { // The higher the value, the higher the priority. // More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ // +optional - Priority *int32 `json:"priority,omitempty" protobuf:"bytes,15,opt,name=priority"` + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,14,opt,name=priority"` // The pod's scheduling constraints // More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ // +optional - Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,16,opt,name=affinity"` + Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,15,opt,name=affinity"` // Maximum number of messages per channel, 0 means unlimited. Defaults to 1000000 - MaxMsgs *uint64 `json:"maxMsgs,omitempty" protobuf:"bytes,17,opt,name=maxMsgs"` + MaxMsgs *uint64 `json:"maxMsgs,omitempty" protobuf:"bytes,16,opt,name=maxMsgs"` // Total size of messages per channel, 0 means unlimited. Defaults to 1GB - MaxBytes *string `json:"maxBytes,omitempty" protobuf:"bytes,18,opt,name=maxBytes"` -} - -// ContainerTemplate defines customized spec for a container -type ContainerTemplate struct { - Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,1,opt,name=resources"` + MaxBytes *string `json:"maxBytes,omitempty" protobuf:"bytes,17,opt,name=maxBytes"` + // Maximum number of subscriptions per channel, 0 means unlimited. Defaults to 1000 + MaxSubs *uint64 `json:"maxSubs,omitempty" protobuf:"bytes,18,opt,name=maxSubs"` + // Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB + MaxPayload *string `json:"maxPayload,omitempty" protobuf:"bytes,19,opt,name=maxPayload"` + // Specifies the time in follower state without a leader before attempting an election, i.e. "72h", “4h35m”. Defaults to 2s + RaftHeartbeatTimeout *string `json:"raftHeartbeatTimeout,omitempty" protobuf:"bytes,20,opt,name=raftHeartbeatTimeout"` + // Specifies the time in candidate state without a leader before attempting an election, i.e. "72h", “4h35m”. Defaults to 2s + RaftElectionTimeout *string `json:"raftElectionTimeout,omitempty" protobuf:"bytes,21,opt,name=raftElectionTimeout"` + // Specifies how long a leader waits without being able to contact a quorum of nodes before stepping down as leader, i.e. "72h", “4h35m”. Defaults to 1s + RaftLeaseTimeout *string `json:"raftLeaseTimeout,omitempty" protobuf:"bytes,22,opt,name=raftLeaseTimeout"` + // Specifies the time without an Apply() operation before sending an heartbeat to ensure timely commit, i.e. "72h", “4h35m”. Defaults to 100ms + RaftCommitTimeout *string `json:"raftCommitTimeout,omitempty" protobuf:"bytes,23,opt,name=raftCommitTimeout"` } // GetReplicas return the replicas of statefulset @@ -141,25 +108,6 @@ func (in *NativeStrategy) GetReplicas() int { return int(in.Replicas) } -// PersistenceStrategy defines the strategy of persistence -type PersistenceStrategy struct { - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - // +optional - StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,1,opt,name=storageClassName"` - // Available access modes such as ReadWriteOnce, ReadWriteMany - // https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes - // +optional - AccessMode *corev1.PersistentVolumeAccessMode `json:"accessMode,omitempty" protobuf:"bytes,2,opt,name=accessMode,casttype=k8s.io/api/core/v1.PersistentVolumeAccessMode"` - // Volume size, e.g. 10Gi - VolumeSize *apiresource.Quantity `json:"volumeSize,omitempty" protobuf:"bytes,3,opt,name=volumeSize"` -} - -// BusConfig has the finalized configuration for EventBus -type BusConfig struct { - NATS *NATSConfig `json:"nats,omitempty" protobuf:"bytes,1,opt,name=nats"` -} - // NATSConfig holds the config of NATS type NATSConfig struct { // NATS streaming url @@ -173,42 +121,3 @@ type NATSConfig struct { // +optional AccessSecret *corev1.SecretKeySelector `json:"accessSecret,omitempty" protobuf:"bytes,4,opt,name=accessSecret"` } - -const ( - // EventBusConditionDeployed has the status True when the EventBus - // has its RestfulSet/Deployment ans service created. - EventBusConditionDeployed common.ConditionType = "Deployed" - // EventBusConditionConfigured has the status True when the EventBus - // has its configuration ready. - EventBusConditionConfigured common.ConditionType = "Configured" -) - -// InitConditions sets conditions to Unknown state. -func (s *EventBusStatus) InitConditions() { - s.InitializeConditions(EventBusConditionDeployed, EventBusConditionConfigured) -} - -// MarkDeployed set the bus has been deployed. -func (s *EventBusStatus) MarkDeployed(reason, message string) { - s.MarkTrueWithReason(EventBusConditionDeployed, reason, message) -} - -// MarkDeploying set the bus is deploying -func (s *EventBusStatus) MarkDeploying(reason, message string) { - s.MarkUnknown(EventBusConditionDeployed, reason, message) -} - -// MarkDeployFailed set the bus deploy failed -func (s *EventBusStatus) MarkDeployFailed(reason, message string) { - s.MarkFalse(EventBusConditionDeployed, reason, message) -} - -// MarkConfigured set the bus configuration has been done. -func (s *EventBusStatus) MarkConfigured() { - s.MarkTrue(EventBusConditionConfigured) -} - -// MarkNotConfigured set the bus status not configured. -func (s *EventBusStatus) MarkNotConfigured(reason, message string) { - s.MarkFalse(EventBusConditionConfigured, reason, message) -} diff --git a/pkg/apis/eventbus/v1alpha1/openapi_generated.go b/pkg/apis/eventbus/v1alpha1/openapi_generated.go index 28f73cc3d8..e2427fb6d4 100644 --- a/pkg/apis/eventbus/v1alpha1/openapi_generated.go +++ b/pkg/apis/eventbus/v1alpha1/openapi_generated.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,8 +24,8 @@ limitations under the License. package v1alpha1 import ( - spec "github.com/go-openapi/spec" common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" ) func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { @@ -35,6 +36,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusList": schema_pkg_apis_eventbus_v1alpha1_EventBusList(ref), "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusSpec": schema_pkg_apis_eventbus_v1alpha1_EventBusSpec(ref), "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusStatus": schema_pkg_apis_eventbus_v1alpha1_EventBusStatus(ref), + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamBus": schema_pkg_apis_eventbus_v1alpha1_JetStreamBus(ref), + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig": schema_pkg_apis_eventbus_v1alpha1_JetStreamConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus": schema_pkg_apis_eventbus_v1alpha1_KafkaBus(ref), + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaConsumerGroup": schema_pkg_apis_eventbus_v1alpha1_KafkaConsumerGroup(ref), "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSBus": schema_pkg_apis_eventbus_v1alpha1_NATSBus(ref), "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSConfig": schema_pkg_apis_eventbus_v1alpha1_NATSConfig(ref), "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NativeStrategy": schema_pkg_apis_eventbus_v1alpha1_NativeStrategy(ref), @@ -54,11 +59,21 @@ func schema_pkg_apis_eventbus_v1alpha1_BusConfig(ref common.ReferenceCallback) c Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSConfig"), }, }, + "jetstream": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig"), + }, + }, + "kafka": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSConfig"}, + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSConfig"}, } } @@ -71,14 +86,26 @@ func schema_pkg_apis_eventbus_v1alpha1_ContainerTemplate(ref common.ReferenceCal Properties: map[string]spec.Schema{ "resources": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ResourceRequirements"), + }, + }, + "imagePullPolicy": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/api/core/v1.SecurityContext"), }, }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ResourceRequirements"}, + "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecurityContext"}, } } @@ -105,17 +132,20 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBus(ref common.ReferenceCallback) co }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, "spec": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusSpec"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusStatus"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBusStatus"), }, }, }, @@ -150,7 +180,8 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBusList(ref common.ReferenceCallback }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { @@ -159,7 +190,8 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBusList(ref common.ReferenceCallback Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBus"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.EventBus"), }, }, }, @@ -187,11 +219,28 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBusSpec(ref common.ReferenceCallback Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSBus"), }, }, + "jetstream": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamBus"), + }, + }, + "kafka": { + SchemaProps: spec.SchemaProps{ + Description: "Kafka eventbus", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus"), + }, + }, + "jetstreamExotic": { + SchemaProps: spec.SchemaProps{ + Description: "Exotic JetStream", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSBus"}, + "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamBus", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.JetStreamConfig", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaBus", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.NATSBus"}, } } @@ -215,7 +264,8 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBusStatus(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), }, }, }, @@ -224,6 +274,7 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBusStatus(ref common.ReferenceCallba "config": { SchemaProps: spec.SchemaProps{ Description: "Config holds the fininalized configuration of EventBus", + Default: map[string]interface{}{}, Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.BusConfig"), }, }, @@ -235,6 +286,303 @@ func schema_pkg_apis_eventbus_v1alpha1_EventBusStatus(ref common.ReferenceCallba } } +func schema_pkg_apis_eventbus_v1alpha1_JetStreamBus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "JetStreamBus holds the JetStream EventBus information", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "JetStream version, such as \"2.7.3\"", + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "JetStream StatefulSet size", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "containerTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "ContainerTemplate contains customized spec for Nats JetStream container", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.ContainerTemplate"), + }, + }, + "reloaderContainerTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "ReloaderContainerTemplate contains customized spec for config reloader container", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.ContainerTemplate"), + }, + }, + "metricsContainerTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "MetricsContainerTemplate contains customized spec for metrics container", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.ContainerTemplate"), + }, + }, + "persistence": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.PersistenceStrategy"), + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata sets the pods's metadata, i.e. annotations and labels", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Metadata"), + }, + }, + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tolerations": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, the pod's tolerations.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), + }, + }, + }, + }, + }, + "securityContext": { + SchemaProps: spec.SchemaProps{ + Description: "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + Ref: ref("k8s.io/api/core/v1.PodSecurityContext"), + }, + }, + "imagePullSecrets": { + VendorExtensible: spec.VendorExtensible{ + Extensions: spec.Extensions{ + "x-kubernetes-patch-merge-key": "name", + "x-kubernetes-patch-strategy": "merge", + }, + }, + SchemaProps: spec.SchemaProps{ + Description: "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + }, + }, + }, + }, + }, + "priorityClassName": { + SchemaProps: spec.SchemaProps{ + Description: "If specified, indicates the Redis pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + Type: []string{"string"}, + Format: "", + }, + }, + "priority": { + SchemaProps: spec.SchemaProps{ + Description: "The priority value. Various system components use this field to find the priority of the Redis pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority. More info: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "affinity": { + SchemaProps: spec.SchemaProps{ + Description: "The pod's scheduling constraints More info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/", + Ref: ref("k8s.io/api/core/v1.Affinity"), + }, + }, + "serviceAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceAccountName to apply to the StatefulSet", + Type: []string{"string"}, + Format: "", + }, + }, + "settings": { + SchemaProps: spec.SchemaProps{ + Description: "JetStream configuration, if not specified, global settings in controller-config will be used. See https://docs.nats.io/running-a-nats-service/configuration#jetstream. Only configure \"max_memory_store\" or \"max_file_store\", do not set \"store_dir\" as it has been hardcoded.", + Type: []string{"string"}, + Format: "", + }, + }, + "startArgs": { + SchemaProps: spec.SchemaProps{ + Description: "Optional arguments to start nats-server. For example, \"-D\" to enable debugging output, \"-DV\" to enable debugging and tracing. Check https://docs.nats.io/ for all the available arguments.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "streamConfig": { + SchemaProps: spec.SchemaProps{ + Description: "Optional configuration for the streams to be created in this JetStream service, if specified, it will be merged with the default configuration in controller-config. It accepts a YAML format configuration, available fields include, \"maxBytes\", \"maxMsgs\", \"maxAge\" (e.g. 72h), \"replicas\" (1, 3, 5), \"duplicates\" (e.g. 5m).", + Type: []string{"string"}, + Format: "", + }, + }, + "maxPayload": { + SchemaProps: spec.SchemaProps{ + Description: "Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.Metadata", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.ContainerTemplate", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.PersistenceStrategy", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.Toleration"}, + } +} + +func schema_pkg_apis_eventbus_v1alpha1_JetStreamConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "JetStream (Nats) URL", + Type: []string{"string"}, + Format: "", + }, + }, + "accessSecret": { + SchemaProps: spec.SchemaProps{ + Description: "Secret for auth", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "streamConfig": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventbus_v1alpha1_KafkaBus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "KafkaBus holds the KafkaBus EventBus information", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "URL to kafka cluster, multiple URLs separated by comma", + Type: []string{"string"}, + Format: "", + }, + }, + "topic": { + SchemaProps: spec.SchemaProps{ + Description: "Topic name, defaults to {namespace_name}-{eventbus_name}", + Type: []string{"string"}, + Format: "", + }, + }, + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Kafka version, sarama defaults to the oldest supported stable version", + Type: []string{"string"}, + Format: "", + }, + }, + "tls": { + SchemaProps: spec.SchemaProps{ + Description: "TLS configuration for the kafka client.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"), + }, + }, + "sasl": { + SchemaProps: spec.SchemaProps{ + Description: "SASL configuration for the kafka client", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.SASLConfig"), + }, + }, + "consumerGroup": { + SchemaProps: spec.SchemaProps{ + Description: "Consumer group for kafka client", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaConsumerGroup"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.KafkaConsumerGroup"}, + } +} + +func schema_pkg_apis_eventbus_v1alpha1_KafkaConsumerGroup(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "groupName": { + SchemaProps: spec.SchemaProps{ + Description: "Consumer group name, defaults to {namespace_name}-{sensor_name}", + Type: []string{"string"}, + Format: "", + }, + }, + "rebalanceStrategy": { + SchemaProps: spec.SchemaProps{ + Description: "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.", + Type: []string{"string"}, + Format: "", + }, + }, + "startOldest": { + SchemaProps: spec.SchemaProps{ + Description: "When starting up a new group do we want to start from the oldest event (true) or the newest event (false), defaults to false", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_eventbus_v1alpha1_NATSBus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -324,13 +672,6 @@ func schema_pkg_apis_eventbus_v1alpha1_NativeStrategy(ref common.ReferenceCallba Format: "", }, }, - "antiAffinity": { - SchemaProps: spec.SchemaProps{ - Description: "Deprecated, use Affinity instead, will be removed in v1.5", - Type: []string{"boolean"}, - Format: "", - }, - }, "persistence": { SchemaProps: spec.SchemaProps{ Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1.PersistenceStrategy"), @@ -356,8 +697,9 @@ func schema_pkg_apis_eventbus_v1alpha1_NativeStrategy(ref common.ReferenceCallba Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -370,7 +712,8 @@ func schema_pkg_apis_eventbus_v1alpha1_NativeStrategy(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Toleration"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, @@ -408,7 +751,8 @@ func schema_pkg_apis_eventbus_v1alpha1_NativeStrategy(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, @@ -455,6 +799,48 @@ func schema_pkg_apis_eventbus_v1alpha1_NativeStrategy(ref common.ReferenceCallba Format: "", }, }, + "maxSubs": { + SchemaProps: spec.SchemaProps{ + Description: "Maximum number of subscriptions per channel, 0 means unlimited. Defaults to 1000", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "maxPayload": { + SchemaProps: spec.SchemaProps{ + Description: "Maximum number of bytes in a message payload, 0 means unlimited. Defaults to 1MB", + Type: []string{"string"}, + Format: "", + }, + }, + "raftHeartbeatTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the time in follower state without a leader before attempting an election, i.e. \"72h\", “4h35m”. Defaults to 2s", + Type: []string{"string"}, + Format: "", + }, + }, + "raftElectionTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the time in candidate state without a leader before attempting an election, i.e. \"72h\", “4h35m”. Defaults to 2s", + Type: []string{"string"}, + Format: "", + }, + }, + "raftLeaseTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies how long a leader waits without being able to contact a quorum of nodes before stepping down as leader, i.e. \"72h\", “4h35m”. Defaults to 1s", + Type: []string{"string"}, + Format: "", + }, + }, + "raftCommitTimeout": { + SchemaProps: spec.SchemaProps{ + Description: "Specifies the time without an Apply() operation before sending an heartbeat to ensure timely commit, i.e. \"72h\", “4h35m”. Defaults to 100ms", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, diff --git a/pkg/apis/eventbus/v1alpha1/persistence_strategy.go b/pkg/apis/eventbus/v1alpha1/persistence_strategy.go new file mode 100644 index 0000000000..dd48d497cf --- /dev/null +++ b/pkg/apis/eventbus/v1alpha1/persistence_strategy.go @@ -0,0 +1,20 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + apiresource "k8s.io/apimachinery/pkg/api/resource" +) + +// PersistenceStrategy defines the strategy of persistence +type PersistenceStrategy struct { + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,1,opt,name=storageClassName"` + // Available access modes such as ReadWriteOnce, ReadWriteMany + // https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes + // +optional + AccessMode *corev1.PersistentVolumeAccessMode `json:"accessMode,omitempty" protobuf:"bytes,2,opt,name=accessMode,casttype=k8s.io/api/core/v1.PersistentVolumeAccessMode"` + // Volume size, e.g. 10Gi + VolumeSize *apiresource.Quantity `json:"volumeSize,omitempty" protobuf:"bytes,3,opt,name=volumeSize"` +} diff --git a/pkg/apis/eventbus/v1alpha1/register.go b/pkg/apis/eventbus/v1alpha1/register.go index 1da99d696c..fdb43df36f 100644 --- a/pkg/apis/eventbus/v1alpha1/register.go +++ b/pkg/apis/eventbus/v1alpha1/register.go @@ -18,7 +18,9 @@ var ( SchemeGroupVersion = schema.GroupVersion{Group: eventbus.Group, Version: "v1alpha1"} // SchemaGroupVersionKind is a group version kind used to attach owner references - SchemaGroupVersionKind = schema.GroupVersionKind{Group: eventbus.Group, Version: "v1alpha1", Kind: eventbus.Kind} + SchemaGroupVersionKind = SchemeGroupVersion.WithKind(eventbus.Kind) + + SchemaGroupVersionResource = SchemeGroupVersion.WithResource(eventbus.Plural) // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) diff --git a/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go index 416329edc1..c013460cb9 100644 --- a/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/eventbus/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -34,6 +35,16 @@ func (in *BusConfig) DeepCopyInto(out *BusConfig) { *out = new(NATSConfig) (*in).DeepCopyInto(*out) } + if in.JetStream != nil { + in, out := &in.JetStream, &out.JetStream + *out = new(JetStreamConfig) + (*in).DeepCopyInto(*out) + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(KafkaBus) + (*in).DeepCopyInto(*out) + } return } @@ -51,6 +62,11 @@ func (in *BusConfig) DeepCopy() *BusConfig { func (in *ContainerTemplate) DeepCopyInto(out *ContainerTemplate) { *out = *in in.Resources.DeepCopyInto(&out.Resources) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + (*in).DeepCopyInto(*out) + } return } @@ -133,6 +149,21 @@ func (in *EventBusSpec) DeepCopyInto(out *EventBusSpec) { *out = new(NATSBus) (*in).DeepCopyInto(*out) } + if in.JetStream != nil { + in, out := &in.JetStream, &out.JetStream + *out = new(JetStreamBus) + (*in).DeepCopyInto(*out) + } + if in.Kafka != nil { + in, out := &in.Kafka, &out.Kafka + *out = new(KafkaBus) + (*in).DeepCopyInto(*out) + } + if in.JetStreamExotic != nil { + in, out := &in.JetStreamExotic, &out.JetStreamExotic + *out = new(JetStreamConfig) + (*in).DeepCopyInto(*out) + } return } @@ -164,6 +195,174 @@ func (in *EventBusStatus) DeepCopy() *EventBusStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JetStreamBus) DeepCopyInto(out *JetStreamBus) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.ContainerTemplate != nil { + in, out := &in.ContainerTemplate, &out.ContainerTemplate + *out = new(ContainerTemplate) + (*in).DeepCopyInto(*out) + } + if in.ReloaderContainerTemplate != nil { + in, out := &in.ReloaderContainerTemplate, &out.ReloaderContainerTemplate + *out = new(ContainerTemplate) + (*in).DeepCopyInto(*out) + } + if in.MetricsContainerTemplate != nil { + in, out := &in.MetricsContainerTemplate, &out.MetricsContainerTemplate + *out = new(ContainerTemplate) + (*in).DeepCopyInto(*out) + } + if in.Persistence != nil { + in, out := &in.Persistence, &out.Persistence + *out = new(PersistenceStrategy) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(common.Metadata) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]v1.LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(v1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = new(string) + **out = **in + } + if in.StartArgs != nil { + in, out := &in.StartArgs, &out.StartArgs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.StreamConfig != nil { + in, out := &in.StreamConfig, &out.StreamConfig + *out = new(string) + **out = **in + } + if in.MaxPayload != nil { + in, out := &in.MaxPayload, &out.MaxPayload + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JetStreamBus. +func (in *JetStreamBus) DeepCopy() *JetStreamBus { + if in == nil { + return nil + } + out := new(JetStreamBus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JetStreamConfig) DeepCopyInto(out *JetStreamConfig) { + *out = *in + if in.AccessSecret != nil { + in, out := &in.AccessSecret, &out.AccessSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JetStreamConfig. +func (in *JetStreamConfig) DeepCopy() *JetStreamConfig { + if in == nil { + return nil + } + out := new(JetStreamConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaBus) DeepCopyInto(out *KafkaBus) { + *out = *in + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(common.TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.SASL != nil { + in, out := &in.SASL, &out.SASL + *out = new(common.SASLConfig) + (*in).DeepCopyInto(*out) + } + if in.ConsumerGroup != nil { + in, out := &in.ConsumerGroup, &out.ConsumerGroup + *out = new(KafkaConsumerGroup) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaBus. +func (in *KafkaBus) DeepCopy() *KafkaBus { + if in == nil { + return nil + } + out := new(KafkaBus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KafkaConsumerGroup) DeepCopyInto(out *KafkaConsumerGroup) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KafkaConsumerGroup. +func (in *KafkaConsumerGroup) DeepCopy() *KafkaConsumerGroup { + if in == nil { + return nil + } + out := new(KafkaConsumerGroup) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NATSBus) DeepCopyInto(out *NATSBus) { *out = *in @@ -298,6 +497,36 @@ func (in *NativeStrategy) DeepCopyInto(out *NativeStrategy) { *out = new(string) **out = **in } + if in.MaxSubs != nil { + in, out := &in.MaxSubs, &out.MaxSubs + *out = new(uint64) + **out = **in + } + if in.MaxPayload != nil { + in, out := &in.MaxPayload, &out.MaxPayload + *out = new(string) + **out = **in + } + if in.RaftHeartbeatTimeout != nil { + in, out := &in.RaftHeartbeatTimeout, &out.RaftHeartbeatTimeout + *out = new(string) + **out = **in + } + if in.RaftElectionTimeout != nil { + in, out := &in.RaftElectionTimeout, &out.RaftElectionTimeout + *out = new(string) + **out = **in + } + if in.RaftLeaseTimeout != nil { + in, out := &in.RaftLeaseTimeout, &out.RaftLeaseTimeout + *out = new(string) + **out = **in + } + if in.RaftCommitTimeout != nil { + in, out := &in.RaftCommitTimeout, &out.RaftCommitTimeout + *out = new(string) + **out = **in + } return } diff --git a/pkg/apis/events/event-data.go b/pkg/apis/events/event-data.go index b39a564d2b..6475d525b9 100644 --- a/pkg/apis/events/event-data.go +++ b/pkg/apis/events/event-data.go @@ -20,10 +20,10 @@ import ( "net/http" "time" + "github.com/minio/minio-go/v7/pkg/notification" "github.com/stripe/stripe-go" sqslib "github.com/aws/aws-sdk-go/service/sqs" - "github.com/minio/minio-go" ) // AMQPEventData represents the event data generated by AMQP eventsource. @@ -101,13 +101,67 @@ type AzureEventsHubEventData struct { Metadata map[string]string `json:"metadata,omitempty"` } +// AzureQueueStorageEventData represents the event data generated by AQS eventsource. +// +k8s:openapi-gen=true +type AzureQueueStorageEventData struct { + // Body is the message body + Body interface{} `json:"body"` + // MessageID is the ID of the message + MessageID string `json:"messageID"` + // InsertionTime is the time the Message was inserted into the Queue. + InsertionTime time.Time `json:"insertionTime"` + // Metadata holds the user defined metadata which will passed along the event payload. + Metadata map[string]string `json:"metadata,omitempty"` +} + +type AzureServiceBusEventData struct { + // ApplicationProperties can be used to store custom metadata for a message + ApplicationProperties map[string]interface{} `json:"applicationProperties"` + // Message body + Body interface{} `json:"body"` + // ContentType is the MIME content type + ContentType *string `json:"contentType"` + // CorrelationID is the correlation identifier + CorrelationID *string `json:"correlationID"` + // EnqueuedTime is the time when the message was enqueued + EnqueuedTime *time.Time `json:"enqueuedTime"` + // ID of the message + MessageID string `json:"messageID"` + // ReplyTo is an application-defined value specify a reply path to the receiver of the message + ReplyTo *string `json:"replyTo"` + // SequenceNumber is a unique number assigned to a message by Service Bus + SequenceNumber *int64 `json:"sequenceNumber"` + // Subject enables an application to indicate the purpose of the message, similar to an email subject line + Subject *string `json:"subject"` + // Metadata holds the user defined metadata which will passed along the event payload + Metadata map[string]string `json:"metadata,omitempty"` +} + +// BitbucketEventData represents the event data generated by the Bitbucket Server eventsource. +type BitbucketEventData struct { + // Headers from the Bitbucket Server http request. + Headers http.Header `json:"headers"` + // Body represents the message body + Body *json.RawMessage `json:"body"` + // Metadata holds the user defined metadata which will passed along the event payload. + Metadata map[string]string `json:"metadata,omitempty"` +} + +// BitbucketServerEventData represents the event data generated by the Bitbucket Server eventsource. +type BitbucketServerEventData struct { + // Headers from the Bitbucket Server http request. + Headers http.Header `json:"headers"` + // Body represents the message body + Body *json.RawMessage `json:"body"` + // Metadata holds the user defined metadata which will passed along the event payload. + Metadata map[string]string `json:"metadata,omitempty"` +} + // CalendarEventData represents the event data generated by the Calendar eventsource. // +k8s:openapi-gen=true type CalendarEventData struct { // EventTime is time at which event occurred EventTime string `json:"eventTime"` - // Deprecated. - UserPayload json.RawMessage `json:"userPayload,omitempty"` // Metadata holds the user defined metadata which will passed along the event payload. Metadata map[string]string `json:"metadata,omitempty"` } @@ -137,14 +191,27 @@ type PubSubEventData struct { Metadata map[string]string `json:"metadata,omitempty"` } +// GerritEventData represents the event data generated by the Gerrit eventsource. +type GerritEventData struct { + // Headers from the Gerrit http request. + Headers http.Header `json:"headers"` + // Body represents the message body + Body *json.RawMessage `json:"body"` + // Metadata holds the user defined metadata which will passed along the event payload. + Metadata map[string]string `json:"metadata,omitempty"` +} + // GithubEventData represents the event data generated by the GitHub eventsource. type GithubEventData struct { - // Headers from the Gitlab http request. + // Headers from the GitHub http request. Headers http.Header `json:"headers"` // Body represents the message body Body *json.RawMessage `json:"body"` // Metadata holds the user defined metadata which will passed along the event payload. Metadata map[string]string `json:"metadata,omitempty"` + // Extras holds information that is added in case any of the EnrichPayload flags are on. + // This information will passed along the event payload. + Extras map[string]*json.RawMessage `json:"extras,omitempty"` } // GitLabEventData represents the event data generated by the GitLab eventsource. @@ -161,19 +228,23 @@ type GitLabEventData struct { type KafkaEventData struct { // Topic refers to the Kafka topic Topic string `json:"topic"` + // Key refers to the Kafka key + Key string `json:"key"` // Partition refers to the Kafka partition Partition int `json:"partition"` // Body refers to the message value Body interface{} `json:"body"` // Timestamp of the message Timestamp string `json:"timestamp"` + // Headers refers to the message headers + Headers map[string]string `json:"headers"` // Metadata holds the user defined metadata which will passed along the event payload. Metadata map[string]string `json:"metadata,omitempty"` } // MinioEventData represents the event data generated by the Minio eventsource. type MinioEventData struct { - Notification []minio.NotificationEvent `json:"notification"` + Notification []notification.Event `json:"notification"` // Metadata holds the user defined metadata which will passed along the event payload. Metadata map[string]string `json:"metadata,omitempty"` } @@ -196,6 +267,8 @@ type NATSEventData struct { Subject string `json:"subject"` // Message data. Body interface{} `json:"body"` + // Header represents the optional Header for a NATS message, based on the implementation of http.Header. + Header map[string][]string `json:"header,omitempty"` // Metadata holds the user defined metadata which will passed along the event payload. Metadata map[string]string `json:"metadata,omitempty"` } @@ -219,7 +292,19 @@ type RedisEventData struct { // Message pattern Pattern string `json:"pattern"` // Message body - Body string `json:"body"` + Body interface{} `json:"body"` + // Metadata holds the user defined metadata which will passed along the event payload. + Metadata map[string]string `json:"metadata,omitempty"` +} + +// RedisStreamEventData represents the event data generated by the Redis eventsource. +type RedisStreamEventData struct { + // Source redis stream name. + Stream string `json:"stream"` + // Message Id + Id string `json:"message_id"` + // Message body + Values map[string]interface{} `json:"values"` // Metadata holds the user defined metadata which will passed along the event payload. Metadata map[string]string `json:"metadata,omitempty"` } @@ -240,6 +325,8 @@ type ResourceEventData struct { Resource string `json:"resource"` // Metadata holds the user defined metadata which will passed along the event payload. Metadata map[string]string `json:"metadata,omitempty"` + // Cluster from which the event occurred + Cluster string `json:"cluster"` } // WebhookEventData represents the event data generated by the Webhook eventsource. diff --git a/pkg/apis/eventsource/v1alpha1/config.go b/pkg/apis/eventsource/v1alpha1/config.go index 07fb8101ff..e955742a8f 100644 --- a/pkg/apis/eventsource/v1alpha1/config.go +++ b/pkg/apis/eventsource/v1alpha1/config.go @@ -1,7 +1,7 @@ package v1alpha1 import ( - "errors" + fmt "fmt" "path" "regexp" ) @@ -18,19 +18,19 @@ type WatchPathConfig struct { // Validate validates WatchPathConfig func (c *WatchPathConfig) Validate() error { if c.Directory == "" { - return errors.New("directory is required") + return fmt.Errorf("directory is required") } if !path.IsAbs(c.Directory) { - return errors.New("directory must be an absolute file path") + return fmt.Errorf("directory must be an absolute file path") } if c.Path == "" && c.PathRegexp == "" { - return errors.New("either path or pathRegexp must be specified") + return fmt.Errorf("either path or pathRegexp must be specified") } if c.Path != "" && c.PathRegexp != "" { - return errors.New("path and pathRegexp cannot be specified together") + return fmt.Errorf("path and pathRegexp cannot be specified together") } if c.Path != "" && path.IsAbs(c.Path) { - return errors.New("path must be a relative file path") + return fmt.Errorf("path must be a relative file path") } if c.PathRegexp != "" { _, err := regexp.Compile(c.PathRegexp) diff --git a/pkg/apis/eventsource/v1alpha1/generated.pb.go b/pkg/apis/eventsource/v1alpha1/generated.pb.go index 8d7825f277..f93cf11654 100644 --- a/pkg/apis/eventsource/v1alpha1/generated.pb.go +++ b/pkg/apis/eventsource/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -214,10 +214,234 @@ func (m *AzureEventsHubEventSource) XXX_DiscardUnknown() { var xxx_messageInfo_AzureEventsHubEventSource proto.InternalMessageInfo +func (m *AzureQueueStorageEventSource) Reset() { *m = AzureQueueStorageEventSource{} } +func (*AzureQueueStorageEventSource) ProtoMessage() {} +func (*AzureQueueStorageEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{6} +} +func (m *AzureQueueStorageEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureQueueStorageEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureQueueStorageEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureQueueStorageEventSource.Merge(m, src) +} +func (m *AzureQueueStorageEventSource) XXX_Size() int { + return m.Size() +} +func (m *AzureQueueStorageEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureQueueStorageEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureQueueStorageEventSource proto.InternalMessageInfo + +func (m *AzureServiceBusEventSource) Reset() { *m = AzureServiceBusEventSource{} } +func (*AzureServiceBusEventSource) ProtoMessage() {} +func (*AzureServiceBusEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{7} +} +func (m *AzureServiceBusEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureServiceBusEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureServiceBusEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureServiceBusEventSource.Merge(m, src) +} +func (m *AzureServiceBusEventSource) XXX_Size() int { + return m.Size() +} +func (m *AzureServiceBusEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureServiceBusEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureServiceBusEventSource proto.InternalMessageInfo + +func (m *BitbucketAuth) Reset() { *m = BitbucketAuth{} } +func (*BitbucketAuth) ProtoMessage() {} +func (*BitbucketAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{8} +} +func (m *BitbucketAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketAuth.Merge(m, src) +} +func (m *BitbucketAuth) XXX_Size() int { + return m.Size() +} +func (m *BitbucketAuth) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketAuth proto.InternalMessageInfo + +func (m *BitbucketBasicAuth) Reset() { *m = BitbucketBasicAuth{} } +func (*BitbucketBasicAuth) ProtoMessage() {} +func (*BitbucketBasicAuth) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{9} +} +func (m *BitbucketBasicAuth) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketBasicAuth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketBasicAuth) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketBasicAuth.Merge(m, src) +} +func (m *BitbucketBasicAuth) XXX_Size() int { + return m.Size() +} +func (m *BitbucketBasicAuth) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketBasicAuth.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketBasicAuth proto.InternalMessageInfo + +func (m *BitbucketEventSource) Reset() { *m = BitbucketEventSource{} } +func (*BitbucketEventSource) ProtoMessage() {} +func (*BitbucketEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{10} +} +func (m *BitbucketEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketEventSource.Merge(m, src) +} +func (m *BitbucketEventSource) XXX_Size() int { + return m.Size() +} +func (m *BitbucketEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketEventSource proto.InternalMessageInfo + +func (m *BitbucketRepository) Reset() { *m = BitbucketRepository{} } +func (*BitbucketRepository) ProtoMessage() {} +func (*BitbucketRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{11} +} +func (m *BitbucketRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketRepository.Merge(m, src) +} +func (m *BitbucketRepository) XXX_Size() int { + return m.Size() +} +func (m *BitbucketRepository) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketRepository proto.InternalMessageInfo + +func (m *BitbucketServerEventSource) Reset() { *m = BitbucketServerEventSource{} } +func (*BitbucketServerEventSource) ProtoMessage() {} +func (*BitbucketServerEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{12} +} +func (m *BitbucketServerEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketServerEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketServerEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketServerEventSource.Merge(m, src) +} +func (m *BitbucketServerEventSource) XXX_Size() int { + return m.Size() +} +func (m *BitbucketServerEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketServerEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketServerEventSource proto.InternalMessageInfo + +func (m *BitbucketServerRepository) Reset() { *m = BitbucketServerRepository{} } +func (*BitbucketServerRepository) ProtoMessage() {} +func (*BitbucketServerRepository) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{13} +} +func (m *BitbucketServerRepository) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketServerRepository) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketServerRepository) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketServerRepository.Merge(m, src) +} +func (m *BitbucketServerRepository) XXX_Size() int { + return m.Size() +} +func (m *BitbucketServerRepository) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketServerRepository.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketServerRepository proto.InternalMessageInfo + func (m *CalendarEventSource) Reset() { *m = CalendarEventSource{} } func (*CalendarEventSource) ProtoMessage() {} func (*CalendarEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{6} + return fileDescriptor_c9ac5d6cd016403b, []int{14} } func (m *CalendarEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -245,7 +469,7 @@ var xxx_messageInfo_CalendarEventSource proto.InternalMessageInfo func (m *CatchupConfiguration) Reset() { *m = CatchupConfiguration{} } func (*CatchupConfiguration) ProtoMessage() {} func (*CatchupConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{7} + return fileDescriptor_c9ac5d6cd016403b, []int{15} } func (m *CatchupConfiguration) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -273,7 +497,7 @@ var xxx_messageInfo_CatchupConfiguration proto.InternalMessageInfo func (m *ConfigMapPersistence) Reset() { *m = ConfigMapPersistence{} } func (*ConfigMapPersistence) ProtoMessage() {} func (*ConfigMapPersistence) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{8} + return fileDescriptor_c9ac5d6cd016403b, []int{16} } func (m *ConfigMapPersistence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -301,7 +525,7 @@ var xxx_messageInfo_ConfigMapPersistence proto.InternalMessageInfo func (m *EmitterEventSource) Reset() { *m = EmitterEventSource{} } func (*EmitterEventSource) ProtoMessage() {} func (*EmitterEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{9} + return fileDescriptor_c9ac5d6cd016403b, []int{17} } func (m *EmitterEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -329,7 +553,7 @@ var xxx_messageInfo_EmitterEventSource proto.InternalMessageInfo func (m *EventPersistence) Reset() { *m = EventPersistence{} } func (*EventPersistence) ProtoMessage() {} func (*EventPersistence) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{10} + return fileDescriptor_c9ac5d6cd016403b, []int{18} } func (m *EventPersistence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -357,7 +581,7 @@ var xxx_messageInfo_EventPersistence proto.InternalMessageInfo func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{11} + return fileDescriptor_c9ac5d6cd016403b, []int{19} } func (m *EventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -382,10 +606,38 @@ func (m *EventSource) XXX_DiscardUnknown() { var xxx_messageInfo_EventSource proto.InternalMessageInfo +func (m *EventSourceFilter) Reset() { *m = EventSourceFilter{} } +func (*EventSourceFilter) ProtoMessage() {} +func (*EventSourceFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{20} +} +func (m *EventSourceFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSourceFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSourceFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSourceFilter.Merge(m, src) +} +func (m *EventSourceFilter) XXX_Size() int { + return m.Size() +} +func (m *EventSourceFilter) XXX_DiscardUnknown() { + xxx_messageInfo_EventSourceFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSourceFilter proto.InternalMessageInfo + func (m *EventSourceList) Reset() { *m = EventSourceList{} } func (*EventSourceList) ProtoMessage() {} func (*EventSourceList) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{12} + return fileDescriptor_c9ac5d6cd016403b, []int{21} } func (m *EventSourceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -413,7 +665,7 @@ var xxx_messageInfo_EventSourceList proto.InternalMessageInfo func (m *EventSourceSpec) Reset() { *m = EventSourceSpec{} } func (*EventSourceSpec) ProtoMessage() {} func (*EventSourceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{13} + return fileDescriptor_c9ac5d6cd016403b, []int{22} } func (m *EventSourceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -441,7 +693,7 @@ var xxx_messageInfo_EventSourceSpec proto.InternalMessageInfo func (m *EventSourceStatus) Reset() { *m = EventSourceStatus{} } func (*EventSourceStatus) ProtoMessage() {} func (*EventSourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{14} + return fileDescriptor_c9ac5d6cd016403b, []int{23} } func (m *EventSourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -469,7 +721,7 @@ var xxx_messageInfo_EventSourceStatus proto.InternalMessageInfo func (m *FileEventSource) Reset() { *m = FileEventSource{} } func (*FileEventSource) ProtoMessage() {} func (*FileEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{15} + return fileDescriptor_c9ac5d6cd016403b, []int{24} } func (m *FileEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -497,7 +749,7 @@ var xxx_messageInfo_FileEventSource proto.InternalMessageInfo func (m *GenericEventSource) Reset() { *m = GenericEventSource{} } func (*GenericEventSource) ProtoMessage() {} func (*GenericEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{16} + return fileDescriptor_c9ac5d6cd016403b, []int{25} } func (m *GenericEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -522,10 +774,66 @@ func (m *GenericEventSource) XXX_DiscardUnknown() { var xxx_messageInfo_GenericEventSource proto.InternalMessageInfo +func (m *GerritEventSource) Reset() { *m = GerritEventSource{} } +func (*GerritEventSource) ProtoMessage() {} +func (*GerritEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{26} +} +func (m *GerritEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GerritEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GerritEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GerritEventSource.Merge(m, src) +} +func (m *GerritEventSource) XXX_Size() int { + return m.Size() +} +func (m *GerritEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_GerritEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GerritEventSource proto.InternalMessageInfo + +func (m *GithubAppCreds) Reset() { *m = GithubAppCreds{} } +func (*GithubAppCreds) ProtoMessage() {} +func (*GithubAppCreds) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{27} +} +func (m *GithubAppCreds) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GithubAppCreds) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GithubAppCreds) XXX_Merge(src proto.Message) { + xxx_messageInfo_GithubAppCreds.Merge(m, src) +} +func (m *GithubAppCreds) XXX_Size() int { + return m.Size() +} +func (m *GithubAppCreds) XXX_DiscardUnknown() { + xxx_messageInfo_GithubAppCreds.DiscardUnknown(m) +} + +var xxx_messageInfo_GithubAppCreds proto.InternalMessageInfo + func (m *GithubEventSource) Reset() { *m = GithubEventSource{} } func (*GithubEventSource) ProtoMessage() {} func (*GithubEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{17} + return fileDescriptor_c9ac5d6cd016403b, []int{28} } func (m *GithubEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -553,7 +861,7 @@ var xxx_messageInfo_GithubEventSource proto.InternalMessageInfo func (m *GitlabEventSource) Reset() { *m = GitlabEventSource{} } func (*GitlabEventSource) ProtoMessage() {} func (*GitlabEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{18} + return fileDescriptor_c9ac5d6cd016403b, []int{29} } func (m *GitlabEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -581,7 +889,7 @@ var xxx_messageInfo_GitlabEventSource proto.InternalMessageInfo func (m *HDFSEventSource) Reset() { *m = HDFSEventSource{} } func (*HDFSEventSource) ProtoMessage() {} func (*HDFSEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{19} + return fileDescriptor_c9ac5d6cd016403b, []int{30} } func (m *HDFSEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -609,7 +917,7 @@ var xxx_messageInfo_HDFSEventSource proto.InternalMessageInfo func (m *KafkaConsumerGroup) Reset() { *m = KafkaConsumerGroup{} } func (*KafkaConsumerGroup) ProtoMessage() {} func (*KafkaConsumerGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{20} + return fileDescriptor_c9ac5d6cd016403b, []int{31} } func (m *KafkaConsumerGroup) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -637,7 +945,7 @@ var xxx_messageInfo_KafkaConsumerGroup proto.InternalMessageInfo func (m *KafkaEventSource) Reset() { *m = KafkaEventSource{} } func (*KafkaEventSource) ProtoMessage() {} func (*KafkaEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{21} + return fileDescriptor_c9ac5d6cd016403b, []int{32} } func (m *KafkaEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -665,7 +973,7 @@ var xxx_messageInfo_KafkaEventSource proto.InternalMessageInfo func (m *MQTTEventSource) Reset() { *m = MQTTEventSource{} } func (*MQTTEventSource) ProtoMessage() {} func (*MQTTEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{22} + return fileDescriptor_c9ac5d6cd016403b, []int{33} } func (m *MQTTEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -693,7 +1001,7 @@ var xxx_messageInfo_MQTTEventSource proto.InternalMessageInfo func (m *NATSAuth) Reset() { *m = NATSAuth{} } func (*NATSAuth) ProtoMessage() {} func (*NATSAuth) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{23} + return fileDescriptor_c9ac5d6cd016403b, []int{34} } func (m *NATSAuth) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -721,7 +1029,7 @@ var xxx_messageInfo_NATSAuth proto.InternalMessageInfo func (m *NATSEventsSource) Reset() { *m = NATSEventsSource{} } func (*NATSEventsSource) ProtoMessage() {} func (*NATSEventsSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{24} + return fileDescriptor_c9ac5d6cd016403b, []int{35} } func (m *NATSEventsSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -749,7 +1057,7 @@ var xxx_messageInfo_NATSEventsSource proto.InternalMessageInfo func (m *NSQEventSource) Reset() { *m = NSQEventSource{} } func (*NSQEventSource) ProtoMessage() {} func (*NSQEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{25} + return fileDescriptor_c9ac5d6cd016403b, []int{36} } func (m *NSQEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -777,7 +1085,7 @@ var xxx_messageInfo_NSQEventSource proto.InternalMessageInfo func (m *OwnedRepositories) Reset() { *m = OwnedRepositories{} } func (*OwnedRepositories) ProtoMessage() {} func (*OwnedRepositories) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{26} + return fileDescriptor_c9ac5d6cd016403b, []int{37} } func (m *OwnedRepositories) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -802,10 +1110,38 @@ func (m *OwnedRepositories) XXX_DiscardUnknown() { var xxx_messageInfo_OwnedRepositories proto.InternalMessageInfo +func (m *PayloadEnrichmentFlags) Reset() { *m = PayloadEnrichmentFlags{} } +func (*PayloadEnrichmentFlags) ProtoMessage() {} +func (*PayloadEnrichmentFlags) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{38} +} +func (m *PayloadEnrichmentFlags) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PayloadEnrichmentFlags) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PayloadEnrichmentFlags) XXX_Merge(src proto.Message) { + xxx_messageInfo_PayloadEnrichmentFlags.Merge(m, src) +} +func (m *PayloadEnrichmentFlags) XXX_Size() int { + return m.Size() +} +func (m *PayloadEnrichmentFlags) XXX_DiscardUnknown() { + xxx_messageInfo_PayloadEnrichmentFlags.DiscardUnknown(m) +} + +var xxx_messageInfo_PayloadEnrichmentFlags proto.InternalMessageInfo + func (m *PubSubEventSource) Reset() { *m = PubSubEventSource{} } func (*PubSubEventSource) ProtoMessage() {} func (*PubSubEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{27} + return fileDescriptor_c9ac5d6cd016403b, []int{39} } func (m *PubSubEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -833,7 +1169,7 @@ var xxx_messageInfo_PubSubEventSource proto.InternalMessageInfo func (m *PulsarEventSource) Reset() { *m = PulsarEventSource{} } func (*PulsarEventSource) ProtoMessage() {} func (*PulsarEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{28} + return fileDescriptor_c9ac5d6cd016403b, []int{40} } func (m *PulsarEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -861,7 +1197,7 @@ var xxx_messageInfo_PulsarEventSource proto.InternalMessageInfo func (m *RedisEventSource) Reset() { *m = RedisEventSource{} } func (*RedisEventSource) ProtoMessage() {} func (*RedisEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{29} + return fileDescriptor_c9ac5d6cd016403b, []int{41} } func (m *RedisEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -886,10 +1222,38 @@ func (m *RedisEventSource) XXX_DiscardUnknown() { var xxx_messageInfo_RedisEventSource proto.InternalMessageInfo -func (m *ResourceEventSource) Reset() { *m = ResourceEventSource{} } -func (*ResourceEventSource) ProtoMessage() {} -func (*ResourceEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{30} +func (m *RedisStreamEventSource) Reset() { *m = RedisStreamEventSource{} } +func (*RedisStreamEventSource) ProtoMessage() {} +func (*RedisStreamEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{42} +} +func (m *RedisStreamEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RedisStreamEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RedisStreamEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedisStreamEventSource.Merge(m, src) +} +func (m *RedisStreamEventSource) XXX_Size() int { + return m.Size() +} +func (m *RedisStreamEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_RedisStreamEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_RedisStreamEventSource proto.InternalMessageInfo + +func (m *ResourceEventSource) Reset() { *m = ResourceEventSource{} } +func (*ResourceEventSource) ProtoMessage() {} +func (*ResourceEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{43} } func (m *ResourceEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -917,7 +1281,7 @@ var xxx_messageInfo_ResourceEventSource proto.InternalMessageInfo func (m *ResourceFilter) Reset() { *m = ResourceFilter{} } func (*ResourceFilter) ProtoMessage() {} func (*ResourceFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{31} + return fileDescriptor_c9ac5d6cd016403b, []int{44} } func (m *ResourceFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -942,10 +1306,38 @@ func (m *ResourceFilter) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceFilter proto.InternalMessageInfo +func (m *SFTPEventSource) Reset() { *m = SFTPEventSource{} } +func (*SFTPEventSource) ProtoMessage() {} +func (*SFTPEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{45} +} +func (m *SFTPEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SFTPEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SFTPEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SFTPEventSource.Merge(m, src) +} +func (m *SFTPEventSource) XXX_Size() int { + return m.Size() +} +func (m *SFTPEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_SFTPEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SFTPEventSource proto.InternalMessageInfo + func (m *SNSEventSource) Reset() { *m = SNSEventSource{} } func (*SNSEventSource) ProtoMessage() {} func (*SNSEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{32} + return fileDescriptor_c9ac5d6cd016403b, []int{46} } func (m *SNSEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -973,7 +1365,7 @@ var xxx_messageInfo_SNSEventSource proto.InternalMessageInfo func (m *SQSEventSource) Reset() { *m = SQSEventSource{} } func (*SQSEventSource) ProtoMessage() {} func (*SQSEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{33} + return fileDescriptor_c9ac5d6cd016403b, []int{47} } func (m *SQSEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1001,7 +1393,7 @@ var xxx_messageInfo_SQSEventSource proto.InternalMessageInfo func (m *Selector) Reset() { *m = Selector{} } func (*Selector) ProtoMessage() {} func (*Selector) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{34} + return fileDescriptor_c9ac5d6cd016403b, []int{48} } func (m *Selector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1029,7 +1421,7 @@ var xxx_messageInfo_Selector proto.InternalMessageInfo func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{35} + return fileDescriptor_c9ac5d6cd016403b, []int{49} } func (m *Service) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1057,7 +1449,7 @@ var xxx_messageInfo_Service proto.InternalMessageInfo func (m *SlackEventSource) Reset() { *m = SlackEventSource{} } func (*SlackEventSource) ProtoMessage() {} func (*SlackEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{36} + return fileDescriptor_c9ac5d6cd016403b, []int{50} } func (m *SlackEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1085,7 +1477,7 @@ var xxx_messageInfo_SlackEventSource proto.InternalMessageInfo func (m *StorageGridEventSource) Reset() { *m = StorageGridEventSource{} } func (*StorageGridEventSource) ProtoMessage() {} func (*StorageGridEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{37} + return fileDescriptor_c9ac5d6cd016403b, []int{51} } func (m *StorageGridEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1113,7 +1505,7 @@ var xxx_messageInfo_StorageGridEventSource proto.InternalMessageInfo func (m *StorageGridFilter) Reset() { *m = StorageGridFilter{} } func (*StorageGridFilter) ProtoMessage() {} func (*StorageGridFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{38} + return fileDescriptor_c9ac5d6cd016403b, []int{52} } func (m *StorageGridFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1141,7 +1533,7 @@ var xxx_messageInfo_StorageGridFilter proto.InternalMessageInfo func (m *StripeEventSource) Reset() { *m = StripeEventSource{} } func (*StripeEventSource) ProtoMessage() {} func (*StripeEventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{39} + return fileDescriptor_c9ac5d6cd016403b, []int{53} } func (m *StripeEventSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1169,7 +1561,7 @@ var xxx_messageInfo_StripeEventSource proto.InternalMessageInfo func (m *Template) Reset() { *m = Template{} } func (*Template) ProtoMessage() {} func (*Template) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{40} + return fileDescriptor_c9ac5d6cd016403b, []int{54} } func (m *Template) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1197,7 +1589,7 @@ var xxx_messageInfo_Template proto.InternalMessageInfo func (m *WatchPathConfig) Reset() { *m = WatchPathConfig{} } func (*WatchPathConfig) ProtoMessage() {} func (*WatchPathConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{41} + return fileDescriptor_c9ac5d6cd016403b, []int{55} } func (m *WatchPathConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1225,7 +1617,7 @@ var xxx_messageInfo_WatchPathConfig proto.InternalMessageInfo func (m *WebhookContext) Reset() { *m = WebhookContext{} } func (*WebhookContext) ProtoMessage() {} func (*WebhookContext) Descriptor() ([]byte, []int) { - return fileDescriptor_c9ac5d6cd016403b, []int{42} + return fileDescriptor_c9ac5d6cd016403b, []int{56} } func (m *WebhookContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1250,6 +1642,34 @@ func (m *WebhookContext) XXX_DiscardUnknown() { var xxx_messageInfo_WebhookContext proto.InternalMessageInfo +func (m *WebhookEventSource) Reset() { *m = WebhookEventSource{} } +func (*WebhookEventSource) ProtoMessage() {} +func (*WebhookEventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_c9ac5d6cd016403b, []int{57} +} +func (m *WebhookEventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookEventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookEventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookEventSource.Merge(m, src) +} +func (m *WebhookEventSource) XXX_Size() int { + return m.Size() +} +func (m *WebhookEventSource) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookEventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookEventSource proto.InternalMessageInfo + func init() { proto.RegisterType((*AMQPConsumeConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AMQPConsumeConfig") proto.RegisterType((*AMQPEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AMQPEventSource") @@ -1259,6 +1679,18 @@ func init() { proto.RegisterType((*AMQPQueueDeclareConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AMQPQueueDeclareConfig") proto.RegisterType((*AzureEventsHubEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AzureEventsHubEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AzureEventsHubEventSource.MetadataEntry") + proto.RegisterType((*AzureQueueStorageEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AzureQueueStorageEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AzureQueueStorageEventSource.MetadataEntry") + proto.RegisterType((*AzureServiceBusEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AzureServiceBusEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.AzureServiceBusEventSource.MetadataEntry") + proto.RegisterType((*BitbucketAuth)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketAuth") + proto.RegisterType((*BitbucketBasicAuth)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketBasicAuth") + proto.RegisterType((*BitbucketEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketEventSource.MetadataEntry") + proto.RegisterType((*BitbucketRepository)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketRepository") + proto.RegisterType((*BitbucketServerEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketServerEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketServerEventSource.MetadataEntry") + proto.RegisterType((*BitbucketServerRepository)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.BitbucketServerRepository") proto.RegisterType((*CalendarEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.CalendarEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.CalendarEventSource.MetadataEntry") proto.RegisterType((*CatchupConfiguration)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.CatchupConfiguration") @@ -1267,14 +1699,20 @@ func init() { proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EmitterEventSource.MetadataEntry") proto.RegisterType((*EventPersistence)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventPersistence") proto.RegisterType((*EventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSource") + proto.RegisterType((*EventSourceFilter)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceFilter") proto.RegisterType((*EventSourceList)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceList") proto.RegisterType((*EventSourceSpec)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec") proto.RegisterMapType((map[string]AMQPEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.AmqpEntry") proto.RegisterMapType((map[string]AzureEventsHubEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.AzureEventsHubEntry") + proto.RegisterMapType((map[string]AzureQueueStorageEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.AzureQueueStorageEntry") + proto.RegisterMapType((map[string]AzureServiceBusEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.AzureServiceBusEntry") + proto.RegisterMapType((map[string]BitbucketEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.BitbucketEntry") + proto.RegisterMapType((map[string]BitbucketServerEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.BitbucketserverEntry") proto.RegisterMapType((map[string]CalendarEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.CalendarEntry") proto.RegisterMapType((map[string]EmitterEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.EmitterEntry") proto.RegisterMapType((map[string]FileEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.FileEntry") proto.RegisterMapType((map[string]GenericEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.GenericEntry") + proto.RegisterMapType((map[string]GerritEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.GerritEntry") proto.RegisterMapType((map[string]GithubEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.GithubEntry") proto.RegisterMapType((map[string]GitlabEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.GitlabEntry") proto.RegisterMapType((map[string]HDFSEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.HdfsEntry") @@ -1286,18 +1724,23 @@ func init() { proto.RegisterMapType((map[string]PubSubEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.PubSubEntry") proto.RegisterMapType((map[string]PulsarEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.PulsarEntry") proto.RegisterMapType((map[string]RedisEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.RedisEntry") + proto.RegisterMapType((map[string]RedisStreamEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.RedisStreamEntry") proto.RegisterMapType((map[string]ResourceEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.ResourceEntry") + proto.RegisterMapType((map[string]SFTPEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.SftpEntry") proto.RegisterMapType((map[string]SlackEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.SlackEntry") proto.RegisterMapType((map[string]SNSEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.SnsEntry") proto.RegisterMapType((map[string]SQSEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.SqsEntry") proto.RegisterMapType((map[string]StorageGridEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.StorageGridEntry") proto.RegisterMapType((map[string]StripeEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.StripeEntry") - proto.RegisterMapType((map[string]WebhookContext)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.WebhookEntry") + proto.RegisterMapType((map[string]WebhookEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceSpec.WebhookEntry") proto.RegisterType((*EventSourceStatus)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.EventSourceStatus") proto.RegisterType((*FileEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.FileEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.FileEventSource.MetadataEntry") proto.RegisterType((*GenericEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GenericEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GenericEventSource.MetadataEntry") + proto.RegisterType((*GerritEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GerritEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GerritEventSource.MetadataEntry") + proto.RegisterType((*GithubAppCreds)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GithubAppCreds") proto.RegisterType((*GithubEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GithubEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GithubEventSource.MetadataEntry") proto.RegisterType((*GitlabEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.GitlabEventSource") @@ -1315,15 +1758,21 @@ func init() { proto.RegisterType((*NSQEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.NSQEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.NSQEventSource.MetadataEntry") proto.RegisterType((*OwnedRepositories)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.OwnedRepositories") + proto.RegisterType((*PayloadEnrichmentFlags)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.PayloadEnrichmentFlags") proto.RegisterType((*PubSubEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.PubSubEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.PubSubEventSource.MetadataEntry") proto.RegisterType((*PulsarEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.PulsarEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.PulsarEventSource.AuthAthenzParamsEntry") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.PulsarEventSource.MetadataEntry") proto.RegisterType((*RedisEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.RedisEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.RedisEventSource.MetadataEntry") + proto.RegisterType((*RedisStreamEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.RedisStreamEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.RedisStreamEventSource.MetadataEntry") proto.RegisterType((*ResourceEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.ResourceEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.ResourceEventSource.MetadataEntry") proto.RegisterType((*ResourceFilter)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.ResourceFilter") + proto.RegisterType((*SFTPEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.SFTPEventSource") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.SFTPEventSource.MetadataEntry") proto.RegisterType((*SNSEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.SNSEventSource") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.SNSEventSource.MetadataEntry") proto.RegisterType((*SQSEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.SQSEventSource") @@ -1342,6 +1791,7 @@ func init() { proto.RegisterType((*WatchPathConfig)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.WatchPathConfig") proto.RegisterType((*WebhookContext)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.WebhookContext") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.WebhookContext.MetadataEntry") + proto.RegisterType((*WebhookEventSource)(nil), "github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1.WebhookEventSource") } func init() { @@ -1349,362 +1799,472 @@ func init() { } var fileDescriptor_c9ac5d6cd016403b = []byte{ - // 5675 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3d, 0x4b, 0x6c, 0x24, 0xc7, - 0x75, 0x1a, 0xce, 0x90, 0x9c, 0x79, 0xfc, 0xd7, 0xae, 0xa4, 0x11, 0x2d, 0x2d, 0x37, 0x14, 0xb2, - 0xd8, 0x8d, 0x25, 0x32, 0xda, 0x24, 0x8e, 0x22, 0xc5, 0x32, 0x66, 0x48, 0x2e, 0x97, 0xe2, 0x67, - 0xc9, 0x37, 0xe4, 0xae, 0x64, 0xfd, 0xdc, 0xd3, 0x53, 0x9c, 0x69, 0xb1, 0xa7, 0x7b, 0xd8, 0xdd, - 0xc3, 0x5d, 0x0a, 0x88, 0x2d, 0x04, 0xc8, 0xc7, 0x56, 0xe4, 0x58, 0x89, 0xf3, 0x73, 0x6e, 0xbe, - 0x18, 0x48, 0x8e, 0x01, 0x72, 0xc9, 0x29, 0x37, 0x01, 0xbe, 0x28, 0x37, 0x03, 0x06, 0x16, 0x16, - 0x93, 0x6b, 0x2e, 0xc9, 0x29, 0x0e, 0x0c, 0x04, 0xf5, 0xe9, 0xea, 0xea, 0x9e, 0x26, 0x97, 0xb3, - 0x33, 0xc3, 0xbd, 0xe4, 0xb4, 0x9c, 0xf7, 0x5e, 0xbd, 0xf7, 0xba, 0xfb, 0x7d, 0xea, 0x55, 0xbd, - 0xaa, 0x85, 0xcd, 0xba, 0x15, 0x34, 0xda, 0xd5, 0x05, 0xd3, 0x6d, 0x2e, 0x1a, 0x5e, 0xdd, 0x6d, - 0x79, 0xee, 0x87, 0xfc, 0x8f, 0x97, 0xe9, 0x11, 0x75, 0x02, 0x7f, 0xb1, 0x75, 0x50, 0x5f, 0x34, - 0x5a, 0x96, 0xbf, 0x28, 0x7e, 0xbb, 0x6d, 0xcf, 0xa4, 0x8b, 0x47, 0xaf, 0x18, 0x76, 0xab, 0x61, - 0xbc, 0xb2, 0x58, 0xa7, 0x0e, 0xf5, 0x8c, 0x80, 0xd6, 0x16, 0x5a, 0x9e, 0x1b, 0xb8, 0xe4, 0xeb, - 0x11, 0xbb, 0x85, 0x90, 0x1d, 0xff, 0xe3, 0x03, 0x31, 0x7c, 0xa1, 0x75, 0x50, 0x5f, 0x60, 0xec, - 0x16, 0x34, 0x76, 0x0b, 0x21, 0xbb, 0xd9, 0x6f, 0x9c, 0x5b, 0x1b, 0xd3, 0x6d, 0x36, 0x5d, 0x27, - 0x29, 0x7f, 0xf6, 0x65, 0x8d, 0x41, 0xdd, 0xad, 0xbb, 0x8b, 0x1c, 0x5c, 0x6d, 0xef, 0xf3, 0x5f, - 0xfc, 0x07, 0xff, 0x4b, 0x92, 0xcf, 0x1f, 0xbc, 0xea, 0x2f, 0x58, 0x2e, 0x63, 0xb9, 0x68, 0xba, - 0x1e, 0x7b, 0xb0, 0x0e, 0x96, 0xbf, 0x1d, 0xd1, 0x34, 0x0d, 0xb3, 0x61, 0x39, 0xd4, 0x3b, 0x8e, - 0xf4, 0x68, 0xd2, 0xc0, 0x48, 0x1b, 0xb5, 0x78, 0xda, 0x28, 0xaf, 0xed, 0x04, 0x56, 0x93, 0x76, - 0x0c, 0xf8, 0xda, 0xa3, 0x06, 0xf8, 0x66, 0x83, 0x36, 0x8d, 0xe4, 0xb8, 0xf9, 0xff, 0xc9, 0xc0, - 0x4c, 0x69, 0x73, 0x67, 0x7b, 0xc9, 0x75, 0xfc, 0x76, 0x93, 0x2e, 0xb9, 0xce, 0xbe, 0x55, 0x27, - 0xbf, 0x03, 0x63, 0xa6, 0x00, 0x78, 0xbb, 0x46, 0xbd, 0x98, 0xb9, 0x9a, 0xb9, 0x5e, 0x28, 0x5f, - 0xfa, 0xfc, 0xe1, 0xdc, 0x53, 0x27, 0x0f, 0xe7, 0xc6, 0x96, 0x22, 0x14, 0xea, 0x74, 0xe4, 0x06, - 0x8c, 0x1a, 0xed, 0xc0, 0x2d, 0x99, 0x07, 0xc5, 0xa1, 0xab, 0x99, 0xeb, 0xf9, 0xf2, 0x94, 0x1c, - 0x32, 0x5a, 0x12, 0x60, 0x0c, 0xf1, 0x64, 0x11, 0x0a, 0xf4, 0x81, 0x69, 0xb7, 0x7d, 0xeb, 0x88, - 0x16, 0xb3, 0x9c, 0x78, 0x46, 0x12, 0x17, 0x56, 0x42, 0x04, 0x46, 0x34, 0x8c, 0xb7, 0xe3, 0x6e, - 0xb8, 0xa6, 0x61, 0x17, 0x73, 0x71, 0xde, 0x5b, 0x02, 0x8c, 0x21, 0x9e, 0x5c, 0x83, 0x11, 0xc7, - 0xbd, 0x67, 0x58, 0x41, 0x71, 0x98, 0x53, 0x4e, 0x4a, 0xca, 0x91, 0x2d, 0x0e, 0x45, 0x89, 0x9d, - 0xff, 0x31, 0xc0, 0x14, 0x7b, 0xf6, 0x15, 0x66, 0x1c, 0x15, 0x6e, 0x4b, 0xe4, 0x05, 0xc8, 0xb6, - 0x3d, 0x5b, 0x3e, 0xf1, 0x98, 0x1c, 0x98, 0xdd, 0xc3, 0x0d, 0x64, 0x70, 0xf2, 0x2a, 0x8c, 0xd3, - 0x07, 0x66, 0xc3, 0x70, 0xea, 0x74, 0xcb, 0x68, 0x52, 0xfe, 0x98, 0x85, 0xf2, 0x65, 0x49, 0x37, - 0xbe, 0xa2, 0xe1, 0x30, 0x46, 0xa9, 0x8f, 0xdc, 0x3d, 0x6e, 0x89, 0x67, 0x4e, 0x19, 0xc9, 0x70, - 0x18, 0xa3, 0x24, 0x37, 0x01, 0x3c, 0xb7, 0x1d, 0x58, 0x4e, 0x7d, 0x9d, 0x1e, 0xf3, 0x87, 0x2f, - 0x94, 0x89, 0x1c, 0x07, 0xa8, 0x30, 0xa8, 0x51, 0x91, 0x3f, 0x80, 0x19, 0xd3, 0x75, 0x1c, 0x6a, - 0x06, 0x96, 0xeb, 0x94, 0x0d, 0xf3, 0xc0, 0xdd, 0xdf, 0xe7, 0x6f, 0x63, 0xec, 0xe6, 0xab, 0x0b, - 0xe7, 0x76, 0x32, 0xe1, 0x25, 0x0b, 0x72, 0x7c, 0xf9, 0xe9, 0x93, 0x87, 0x73, 0x33, 0x4b, 0x49, - 0xb6, 0xd8, 0x29, 0x89, 0xbc, 0x04, 0xf9, 0x0f, 0x7d, 0xd7, 0x29, 0xbb, 0xb5, 0xe3, 0xe2, 0x08, - 0xff, 0x06, 0xd3, 0x52, 0xe1, 0xfc, 0x9b, 0x95, 0x3b, 0x5b, 0x0c, 0x8e, 0x8a, 0x82, 0xec, 0x41, - 0x36, 0xb0, 0xfd, 0xe2, 0x28, 0x57, 0xef, 0xb5, 0xae, 0xd5, 0xdb, 0xdd, 0xa8, 0x08, 0xb3, 0x2d, - 0x8f, 0xb2, 0x6f, 0xb5, 0xbb, 0x51, 0x41, 0xc6, 0x8f, 0x7c, 0x2f, 0x03, 0x79, 0xe6, 0x5f, 0x35, - 0x23, 0x30, 0x8a, 0xf9, 0xab, 0xd9, 0xeb, 0x63, 0x37, 0xdf, 0x5d, 0xe8, 0x29, 0xc0, 0x2c, 0x24, - 0xac, 0x65, 0x61, 0x53, 0xb2, 0x5f, 0x71, 0x02, 0xef, 0x38, 0x7a, 0xc6, 0x10, 0x8c, 0x4a, 0x3e, - 0xf9, 0x9b, 0x0c, 0x4c, 0x85, 0x5f, 0x75, 0x99, 0x9a, 0xb6, 0xe1, 0xd1, 0x62, 0x81, 0x3f, 0xf0, - 0x5b, 0xfd, 0xd0, 0x29, 0xce, 0x59, 0xbe, 0x8e, 0x4b, 0x27, 0x0f, 0xe7, 0xa6, 0x12, 0x28, 0x4c, - 0x6a, 0x41, 0x3e, 0xc9, 0xc0, 0xf8, 0x61, 0x9b, 0xb6, 0x95, 0x5a, 0xc0, 0xd5, 0xda, 0xeb, 0x83, - 0x5a, 0x3b, 0x1a, 0x5b, 0xa9, 0xd3, 0x34, 0x33, 0x76, 0x1d, 0x8e, 0x31, 0xe1, 0xe4, 0x3b, 0x50, - 0xe0, 0xbf, 0xcb, 0x96, 0x53, 0x2b, 0x8e, 0x71, 0x4d, 0xb0, 0x5f, 0x9a, 0x30, 0x9e, 0x52, 0x8d, - 0x09, 0x16, 0x67, 0x14, 0x10, 0x23, 0x99, 0xe4, 0x3e, 0x8c, 0xca, 0x90, 0x56, 0x1c, 0xe7, 0xe2, - 0xb7, 0xfb, 0x20, 0x3e, 0x16, 0x5d, 0xcb, 0x63, 0x2c, 0x6a, 0x49, 0x10, 0x86, 0xd2, 0xc8, 0x5b, - 0x90, 0x33, 0xda, 0x41, 0xa3, 0x38, 0xf1, 0x98, 0x6e, 0x50, 0x36, 0x7c, 0xcb, 0x2c, 0xb5, 0x83, - 0x46, 0x39, 0x7f, 0xf2, 0x70, 0x2e, 0xc7, 0xfe, 0x42, 0xce, 0x71, 0xf6, 0x75, 0x98, 0x88, 0x19, - 0x2a, 0x99, 0x86, 0xec, 0x01, 0x3d, 0x16, 0x41, 0x0e, 0xd9, 0x9f, 0xe4, 0x32, 0x0c, 0x1f, 0x19, - 0x76, 0x5b, 0x06, 0x34, 0x14, 0x3f, 0x5e, 0x1b, 0x7a, 0x35, 0x33, 0xff, 0x45, 0x06, 0x9e, 0x3b, - 0xd5, 0xc4, 0x58, 0x54, 0xae, 0xb5, 0x3d, 0xa3, 0x6a, 0x53, 0xce, 0x4d, 0x8b, 0xca, 0xcb, 0x02, - 0x8c, 0x21, 0x9e, 0x85, 0x31, 0x16, 0xfc, 0x97, 0xa9, 0x4d, 0x03, 0x2a, 0xf3, 0x83, 0x0a, 0x63, - 0x25, 0x85, 0x41, 0x8d, 0x8a, 0xc5, 0x11, 0xcb, 0x09, 0xa8, 0xe7, 0x18, 0xb6, 0x4c, 0x12, 0xca, - 0xc7, 0xd6, 0x24, 0x1c, 0x15, 0x85, 0x16, 0xf7, 0x73, 0x67, 0xc6, 0xfd, 0xaf, 0xc3, 0xa5, 0x14, - 0x9b, 0xd0, 0x86, 0x67, 0xce, 0x1c, 0xfe, 0x5f, 0x19, 0x78, 0x26, 0xdd, 0xba, 0xc9, 0x55, 0xc8, - 0x39, 0x2c, 0x2d, 0x88, 0xf4, 0x31, 0x2e, 0x19, 0xe4, 0x78, 0x3a, 0xe0, 0x18, 0xfd, 0x85, 0x0d, - 0x75, 0xf5, 0xc2, 0xb2, 0xe7, 0x7a, 0x61, 0xb1, 0xb4, 0x9a, 0x3b, 0x47, 0x5a, 0x3d, 0x6f, 0xae, - 0xfc, 0x61, 0x0e, 0x9e, 0x2b, 0x7d, 0xd4, 0xf6, 0x28, 0x0f, 0x7f, 0xfe, 0xed, 0x76, 0x55, 0xcf, - 0x9a, 0x57, 0x21, 0xb7, 0x7f, 0x58, 0x73, 0x92, 0xcf, 0x7d, 0x6b, 0x67, 0x79, 0x0b, 0x39, 0x86, - 0xb4, 0xe0, 0x92, 0xdf, 0x30, 0x3c, 0x5a, 0x2b, 0x99, 0x26, 0xf5, 0xfd, 0x75, 0x7a, 0xac, 0xf2, - 0xe7, 0xd8, 0xcd, 0x5f, 0x5f, 0x10, 0xb3, 0x17, 0x66, 0xcf, 0x0b, 0x6c, 0x22, 0xb5, 0x70, 0xf4, - 0xca, 0x42, 0x85, 0x9a, 0x1e, 0x0d, 0xd6, 0xe9, 0x71, 0x85, 0xda, 0xd4, 0x0c, 0x5c, 0xaf, 0xfc, - 0xec, 0xc9, 0xc3, 0xb9, 0x4b, 0x95, 0x4e, 0x2e, 0x98, 0xc6, 0x9a, 0xd4, 0x60, 0x2a, 0x01, 0xe6, - 0xef, 0xf0, 0xdc, 0xd2, 0x78, 0xf4, 0x4c, 0x48, 0xc3, 0x24, 0x4b, 0xf6, 0x3d, 0x1b, 0xed, 0x2a, - 0x7f, 0x16, 0x91, 0x99, 0xd5, 0xf7, 0xbc, 0x2d, 0xc0, 0x18, 0xe2, 0xc9, 0x0f, 0xf5, 0x7c, 0x34, - 0xcc, 0xf3, 0xd1, 0x7e, 0xaf, 0xb1, 0xe5, 0xb4, 0x2f, 0x72, 0xfe, 0xcc, 0xd4, 0x5b, 0x74, 0xf8, - 0x55, 0x0e, 0x2e, 0x2d, 0x19, 0x36, 0x75, 0x6a, 0x86, 0xa7, 0x1b, 0xc4, 0x4b, 0x90, 0x67, 0x13, - 0xce, 0x5a, 0xdb, 0x0e, 0x9d, 0x41, 0xa9, 0x50, 0x91, 0x70, 0x54, 0x14, 0xca, 0xcd, 0x8f, 0x0c, - 0x5b, 0xce, 0xa8, 0xe2, 0x6e, 0x7e, 0xa4, 0xdc, 0xfc, 0xc8, 0xb0, 0xc9, 0x6b, 0x30, 0x29, 0xed, - 0xd7, 0x75, 0x96, 0x8d, 0x80, 0xfa, 0xc5, 0xec, 0xd5, 0x2c, 0x9b, 0x13, 0x9d, 0x3c, 0x9c, 0x9b, - 0x5c, 0x89, 0x61, 0x30, 0x41, 0xc9, 0x24, 0xb1, 0xd9, 0xf0, 0x47, 0xae, 0x13, 0x7e, 0x2f, 0x25, - 0x69, 0x57, 0xc2, 0x51, 0x51, 0x90, 0x4d, 0x18, 0x6b, 0xfb, 0xd4, 0xdb, 0x36, 0x8e, 0x6d, 0xd7, - 0xa8, 0x71, 0x0f, 0x19, 0x2f, 0x7f, 0x95, 0x4d, 0x81, 0xf7, 0x22, 0xf0, 0x2f, 0x1f, 0xce, 0x15, - 0xa9, 0x63, 0xba, 0x35, 0xcb, 0xa9, 0x2f, 0xb2, 0x39, 0xcd, 0x02, 0x1a, 0xf7, 0x37, 0xa9, 0xef, - 0x1b, 0x75, 0x8a, 0xfa, 0x78, 0xf2, 0x7d, 0xdd, 0x00, 0x46, 0xb8, 0x01, 0x7c, 0xab, 0x47, 0x03, - 0x48, 0x79, 0xf7, 0x5d, 0x4c, 0x4a, 0xfe, 0x30, 0x03, 0x63, 0x2d, 0xea, 0xf9, 0x96, 0x1f, 0x50, - 0xc7, 0xa4, 0x72, 0x06, 0x76, 0xa7, 0x47, 0x9d, 0xb8, 0x2e, 0xdb, 0x11, 0xdb, 0xf2, 0x14, 0x7b, - 0x63, 0x1a, 0x00, 0x75, 0xa1, 0xbd, 0xd9, 0xdf, 0x03, 0xb8, 0xbc, 0x64, 0x04, 0x66, 0xa3, 0xdd, - 0x12, 0x11, 0xb8, 0xed, 0x19, 0x6c, 0x1e, 0xca, 0xdc, 0x92, 0x3a, 0x2c, 0x8a, 0xd6, 0x92, 0x79, - 0x69, 0x45, 0x80, 0x31, 0xc4, 0xb3, 0x5a, 0xa7, 0x69, 0x3c, 0x58, 0x96, 0x23, 0xa5, 0xfd, 0xa9, - 0x5a, 0x67, 0x33, 0x42, 0xa1, 0x4e, 0x37, 0xff, 0x6d, 0xb8, 0x2c, 0x44, 0x6e, 0x1a, 0x2d, 0xed, - 0xd9, 0xce, 0x91, 0x02, 0x96, 0x61, 0xda, 0xf4, 0xa8, 0x11, 0xd0, 0xb5, 0xfd, 0x2d, 0x37, 0x58, - 0x79, 0x60, 0xf9, 0x81, 0xcc, 0x05, 0x45, 0x49, 0x3d, 0xbd, 0x94, 0xc0, 0x63, 0xc7, 0x88, 0xf9, - 0x1f, 0x8d, 0x00, 0x59, 0x69, 0x5a, 0x41, 0x40, 0x63, 0x8e, 0x77, 0x0d, 0x46, 0xaa, 0x9e, 0x7b, - 0x40, 0x3d, 0xa9, 0x80, 0x8a, 0xe7, 0x65, 0x0e, 0x45, 0x89, 0x65, 0xc9, 0x85, 0xe5, 0x73, 0x87, - 0xda, 0x2c, 0x30, 0x0e, 0xc5, 0x8b, 0x8a, 0x25, 0x85, 0x41, 0x8d, 0x8a, 0x57, 0x85, 0xe2, 0x17, - 0x8f, 0x77, 0xd9, 0x44, 0x55, 0x18, 0xa1, 0x50, 0xa7, 0x23, 0x77, 0x20, 0xcf, 0xbc, 0xc0, 0x09, - 0x63, 0xe4, 0xb9, 0x23, 0xf0, 0x38, 0x33, 0xdb, 0x3d, 0x39, 0x14, 0x15, 0x13, 0xc6, 0xb0, 0x65, - 0xf8, 0xfe, 0x7d, 0xd7, 0xab, 0xc9, 0x9a, 0xa6, 0x1b, 0x86, 0xdb, 0x72, 0x28, 0x2a, 0x26, 0xe9, - 0xd5, 0xd2, 0xc8, 0x13, 0xa9, 0x96, 0x46, 0xcf, 0x5b, 0x2d, 0xe5, 0xfb, 0x5c, 0x2d, 0x7d, 0xaa, - 0x07, 0xa7, 0x02, 0x0f, 0x4e, 0x1f, 0xf4, 0x1a, 0x08, 0x3a, 0xcc, 0xf3, 0xa2, 0xd2, 0xd2, 0x67, - 0x43, 0x30, 0x9d, 0x0c, 0x43, 0xe4, 0x23, 0x18, 0x35, 0x45, 0xac, 0xe0, 0x4c, 0xc6, 0x6e, 0x56, - 0x7a, 0x0e, 0xbe, 0x9d, 0x91, 0x47, 0x4e, 0xee, 0x05, 0x06, 0x43, 0x81, 0xe4, 0xe3, 0x0c, 0x14, - 0xcc, 0x30, 0x5c, 0xc8, 0x59, 0x4f, 0xcf, 0xe2, 0x53, 0xc2, 0x8f, 0x28, 0x6c, 0x14, 0x06, 0x23, - 0xa1, 0xf3, 0x3f, 0x1f, 0x82, 0x31, 0x3d, 0x52, 0x7c, 0x4b, 0xfb, 0xde, 0xe2, 0x7d, 0xfc, 0xa6, - 0xe6, 0x45, 0x6a, 0x11, 0x29, 0x52, 0x82, 0x51, 0x33, 0xbf, 0xba, 0x53, 0xfd, 0x90, 0x9a, 0x01, - 0xfb, 0x38, 0x51, 0xc4, 0x88, 0x60, 0x5a, 0x7a, 0x69, 0x41, 0xce, 0x6f, 0x51, 0x53, 0x3e, 0xee, - 0x56, 0x3f, 0xd2, 0x8a, 0xd0, 0xbd, 0xd2, 0xa2, 0x66, 0x14, 0x5a, 0xd9, 0x2f, 0xe4, 0x92, 0xc8, - 0x03, 0x18, 0xf1, 0x03, 0x23, 0x68, 0xfb, 0x72, 0xaa, 0xb7, 0xdd, 0x47, 0x99, 0x9c, 0x6f, 0x14, - 0x4f, 0xc5, 0x6f, 0x94, 0xf2, 0xe6, 0x7f, 0x91, 0x81, 0x29, 0x8d, 0x7a, 0xc3, 0xf2, 0x03, 0xf2, - 0x6e, 0xc7, 0x1b, 0x5e, 0x38, 0xdf, 0x1b, 0x66, 0xa3, 0xf9, 0xfb, 0x55, 0x0e, 0x12, 0x42, 0xb4, - 0xb7, 0xeb, 0xc2, 0xb0, 0x15, 0xd0, 0xa6, 0x5f, 0x1c, 0xe2, 0xce, 0xfa, 0x66, 0xff, 0x1e, 0xb5, - 0x3c, 0x21, 0xc5, 0x0e, 0xaf, 0x31, 0x01, 0x28, 0xe4, 0xcc, 0xff, 0xe8, 0x6b, 0xb1, 0x47, 0x64, - 0xaf, 0x9d, 0xaf, 0x6a, 0x31, 0x50, 0xb9, 0xed, 0x6f, 0x45, 0x59, 0x2f, 0x5a, 0xd5, 0xd2, 0x70, - 0x18, 0xa3, 0x24, 0x87, 0x90, 0x0f, 0x68, 0xb3, 0x65, 0x1b, 0x41, 0x58, 0x05, 0xac, 0xf6, 0xf8, - 0x04, 0xbb, 0x92, 0x9d, 0x08, 0xf3, 0xe1, 0x2f, 0x54, 0x62, 0x48, 0x13, 0x46, 0x7d, 0xea, 0x1d, - 0x59, 0x26, 0x95, 0xe6, 0x71, 0xab, 0x47, 0x89, 0x15, 0xc1, 0x4d, 0xf8, 0xbc, 0xfc, 0x81, 0xa1, - 0x0c, 0xb2, 0x08, 0xa3, 0x1e, 0x6d, 0xd9, 0x96, 0x69, 0xf0, 0xb4, 0x37, 0x2c, 0x32, 0xc2, 0x32, - 0x6d, 0x79, 0xd4, 0x34, 0x02, 0x5a, 0x43, 0x81, 0xc4, 0x90, 0x8a, 0x7c, 0x1b, 0x86, 0x9b, 0x96, - 0x63, 0xb9, 0xb2, 0x38, 0x78, 0xbb, 0xbf, 0x0e, 0xb3, 0xb0, 0xc9, 0x78, 0x8b, 0xc0, 0xab, 0x3e, - 0x30, 0x87, 0xa1, 0x10, 0xcb, 0x17, 0xcc, 0x4c, 0x39, 0xa1, 0x94, 0xf3, 0xd3, 0x77, 0xfb, 0xac, - 0x83, 0x9a, 0xaf, 0xc6, 0xe3, 0x7f, 0x08, 0x46, 0x25, 0x9f, 0x7c, 0x04, 0xb9, 0x7d, 0xcb, 0x66, - 0x73, 0xd2, 0x6c, 0x1f, 0x16, 0xc9, 0x92, 0x7a, 0xdc, 0xb2, 0x6c, 0x2a, 0x74, 0x88, 0x8a, 0x55, - 0xcb, 0xa6, 0xc8, 0x65, 0xf2, 0x17, 0xe1, 0x51, 0xc1, 0xa3, 0x4f, 0x2b, 0x87, 0x49, 0x05, 0x50, - 0xb2, 0x4f, 0xbc, 0x88, 0x10, 0x8c, 0x4a, 0x3e, 0xf9, 0xe3, 0x0c, 0x8c, 0xde, 0xa7, 0xd5, 0x86, - 0xeb, 0x1e, 0xc8, 0xbc, 0xfc, 0x4e, 0x9f, 0x75, 0xb9, 0x27, 0xb8, 0x0b, 0x55, 0xd4, 0x44, 0x59, - 0x42, 0x31, 0x14, 0xce, 0xbe, 0x88, 0xd1, 0x3c, 0x6c, 0x15, 0x61, 0x20, 0x5f, 0xa4, 0xd4, 0x3c, - 0x6c, 0x25, 0xbe, 0x48, 0x69, 0x73, 0x67, 0x1b, 0xb9, 0x4c, 0xe6, 0x1a, 0x07, 0xc6, 0xfe, 0x81, - 0x51, 0x1c, 0x1b, 0x88, 0x6b, 0xac, 0x33, 0xde, 0x09, 0xd7, 0xe0, 0x30, 0x14, 0x62, 0xd9, 0xb3, - 0x37, 0x0f, 0x83, 0xa0, 0x38, 0x3e, 0x90, 0x67, 0xdf, 0x3c, 0x0c, 0x82, 0xc4, 0xb3, 0x6f, 0xee, - 0xec, 0xee, 0x22, 0x97, 0xc9, 0x64, 0x3b, 0x46, 0xe0, 0x17, 0x27, 0x06, 0x22, 0x7b, 0xcb, 0x08, - 0xfc, 0x84, 0xec, 0xad, 0xd2, 0x6e, 0x05, 0xb9, 0x4c, 0x72, 0x04, 0x59, 0xdf, 0xf1, 0x8b, 0x93, - 0x5c, 0xf4, 0xbd, 0x3e, 0x8b, 0xae, 0x38, 0x52, 0xb2, 0xda, 0x67, 0xa9, 0x6c, 0x55, 0x90, 0x09, - 0xe4, 0x72, 0x0f, 0xfd, 0xe2, 0xd4, 0x60, 0xe4, 0x1e, 0x76, 0xc8, 0xdd, 0x61, 0x72, 0x0f, 0x7d, - 0x56, 0x11, 0x8f, 0xb4, 0xda, 0xd5, 0x4a, 0xbb, 0x5a, 0x9c, 0xe6, 0xb2, 0xbf, 0xd9, 0x67, 0xd9, - 0xdb, 0x9c, 0xb9, 0x10, 0xaf, 0xe6, 0x12, 0x02, 0x88, 0x52, 0x32, 0x57, 0x42, 0x48, 0x2d, 0xce, - 0x0c, 0x44, 0x89, 0x55, 0xce, 0x2d, 0xa1, 0x84, 0x00, 0xa2, 0x94, 0x1c, 0x2a, 0x61, 0x1b, 0xd5, - 0x22, 0x19, 0x94, 0x12, 0xb6, 0x91, 0xa2, 0x84, 0x6d, 0x08, 0x25, 0x6c, 0xa3, 0xca, 0x4c, 0xbf, - 0x51, 0xdb, 0xf7, 0x8b, 0x97, 0x06, 0x62, 0xfa, 0xb7, 0x6b, 0xfb, 0x49, 0xd3, 0xbf, 0xbd, 0x7c, - 0xab, 0x82, 0x5c, 0x26, 0x0b, 0x39, 0xbe, 0x6d, 0x98, 0x07, 0xc5, 0xcb, 0x03, 0x09, 0x39, 0x15, - 0xc6, 0x3b, 0x11, 0x72, 0x38, 0x0c, 0x85, 0x58, 0xf2, 0xd7, 0x19, 0x18, 0xf3, 0x03, 0xd7, 0x33, - 0xea, 0x74, 0xd5, 0xb3, 0x6a, 0xc5, 0xa7, 0xfb, 0x53, 0x93, 0x25, 0xd5, 0x88, 0x24, 0x08, 0x65, - 0x54, 0x3d, 0xaf, 0x61, 0x50, 0x57, 0x84, 0xfc, 0x38, 0x03, 0x93, 0x46, 0x6c, 0xe1, 0xb1, 0xf8, - 0x0c, 0xd7, 0xad, 0xda, 0xef, 0x94, 0x10, 0x5f, 0xdd, 0xe4, 0xea, 0x3d, 0x23, 0xd5, 0x9b, 0x8c, - 0x23, 0x31, 0xa1, 0x11, 0x37, 0x5f, 0x3f, 0xf0, 0xac, 0x16, 0x2d, 0x3e, 0x3b, 0x10, 0xf3, 0xad, - 0x70, 0xe6, 0x09, 0xf3, 0x15, 0x40, 0x94, 0x92, 0x79, 0xea, 0xa6, 0xa2, 0x08, 0x2e, 0x16, 0x07, - 0x92, 0xba, 0xc3, 0x12, 0x3b, 0x9e, 0xba, 0x25, 0x14, 0x43, 0xe1, 0xcc, 0x96, 0x3d, 0x5a, 0xb3, - 0xfc, 0xe2, 0x73, 0x03, 0xb1, 0x65, 0x64, 0xbc, 0x13, 0xb6, 0xcc, 0x61, 0x28, 0xc4, 0xb2, 0x70, - 0xee, 0xf8, 0x87, 0xc5, 0xd9, 0x81, 0x84, 0xf3, 0x2d, 0xff, 0x30, 0x11, 0xce, 0xb7, 0x2a, 0x3b, - 0xc8, 0x04, 0xca, 0x70, 0x6e, 0xfb, 0x86, 0x57, 0xfc, 0xca, 0x80, 0xc2, 0x39, 0x63, 0xde, 0x11, - 0xce, 0x19, 0x10, 0xa5, 0x64, 0x6e, 0x05, 0xbc, 0xed, 0xc2, 0x32, 0x8b, 0xcf, 0x0f, 0xc4, 0x0a, - 0x56, 0x05, 0xf7, 0x84, 0x15, 0x48, 0x28, 0x86, 0xc2, 0xc9, 0x75, 0x36, 0xab, 0xe5, 0xa5, 0x86, - 0x5f, 0x7c, 0x81, 0x57, 0x24, 0xe3, 0x62, 0xce, 0x29, 0x60, 0xa8, 0xb0, 0xb3, 0x6d, 0x80, 0xa8, - 0x5a, 0x48, 0x59, 0x79, 0xd9, 0xd1, 0x57, 0x5e, 0xc6, 0x6e, 0xbe, 0xde, 0xf5, 0x2a, 0x54, 0xe5, - 0xb7, 0x4a, 0x5e, 0x60, 0xed, 0x1b, 0x66, 0xa0, 0x2d, 0xdb, 0xcc, 0xfe, 0x79, 0x06, 0x26, 0x62, - 0x15, 0x42, 0x8a, 0xe8, 0x46, 0x5c, 0x34, 0xf6, 0x7f, 0x01, 0x5d, 0xd7, 0xe8, 0x4f, 0x32, 0x50, - 0x50, 0xb5, 0x42, 0x8a, 0x36, 0xb5, 0xb8, 0x36, 0xbd, 0xae, 0x71, 0x70, 0x51, 0xe9, 0x9a, 0xb0, - 0x77, 0x13, 0x2b, 0x1a, 0x06, 0xff, 0x6e, 0x94, 0xb8, 0x74, 0x8d, 0xbe, 0x9b, 0x81, 0x71, 0xbd, - 0x74, 0x48, 0x51, 0xc8, 0x8c, 0x2b, 0xb4, 0xd9, 0xa3, 0x42, 0x52, 0xda, 0x92, 0xeb, 0x04, 0xf4, - 0x41, 0x90, 0xfc, 0x4e, 0xaa, 0x82, 0x18, 0xfc, 0x77, 0x4a, 0xf4, 0x81, 0x24, 0xde, 0x0a, 0x44, - 0xe5, 0x44, 0x8a, 0x2a, 0x34, 0xae, 0x4a, 0xaf, 0xbb, 0x2d, 0x42, 0xd6, 0xe9, 0xd6, 0xab, 0x6a, - 0x8b, 0xc1, 0xbf, 0x15, 0x56, 0xb3, 0x9c, 0xa2, 0xc9, 0x9f, 0x66, 0xa0, 0xa0, 0x2a, 0x8d, 0xc1, - 0xbf, 0x14, 0x56, 0xc1, 0x88, 0xb9, 0x40, 0xa7, 0x2a, 0x7f, 0x94, 0x81, 0x7c, 0x58, 0x79, 0x0c, - 0xde, 0x64, 0x2b, 0x5b, 0x95, 0x53, 0x5e, 0x09, 0xd7, 0xe3, 0xf0, 0xc2, 0xf4, 0xd8, 0x39, 0x4d, - 0x8f, 0x4f, 0x32, 0x30, 0xa6, 0x55, 0x25, 0x29, 0xaa, 0xec, 0xc7, 0x55, 0xe9, 0x75, 0x51, 0x55, - 0x0a, 0x3b, 0x5d, 0x1b, 0xad, 0x3c, 0x19, 0xbc, 0x36, 0x52, 0xd8, 0x99, 0xda, 0x84, 0x75, 0xca, - 0x85, 0x68, 0xc3, 0x84, 0x9d, 0xee, 0xce, 0xaa, 0x66, 0x19, 0xbc, 0x3b, 0xb3, 0x5a, 0xe8, 0x8c, - 0x20, 0x17, 0x15, 0x30, 0x83, 0xf7, 0x67, 0x21, 0x2b, 0x5d, 0x97, 0xbf, 0xca, 0xc0, 0x74, 0xb2, - 0x8a, 0x49, 0xd1, 0xe8, 0x20, 0xae, 0x51, 0xaf, 0xed, 0x6d, 0xba, 0xc4, 0x74, 0xbd, 0xfe, 0x3e, - 0x03, 0x97, 0x52, 0x2a, 0x98, 0x14, 0xd5, 0x9c, 0xb8, 0x6a, 0x6f, 0x0d, 0xaa, 0x29, 0x24, 0x69, - 0xd9, 0x5a, 0x09, 0x33, 0x78, 0xcb, 0x96, 0xc2, 0xd2, 0xb5, 0xf9, 0x34, 0x03, 0xe3, 0x7a, 0x29, - 0x93, 0xa2, 0x4e, 0x3d, 0xae, 0xce, 0x4e, 0xdf, 0xf7, 0x26, 0x93, 0xf6, 0x1d, 0x15, 0x35, 0x83, - 0xb7, 0x6f, 0x21, 0xeb, 0xf4, 0x3c, 0x11, 0x96, 0x38, 0x83, 0xcf, 0x13, 0x5b, 0x95, 0x9d, 0x33, - 0xf3, 0x84, 0x2a, 0x77, 0x2e, 0x22, 0x4f, 0x70, 0x61, 0xa7, 0x5b, 0x8c, 0x5e, 0xf6, 0x0c, 0xde, - 0x62, 0x42, 0x69, 0xa9, 0xfa, 0xcc, 0x07, 0x30, 0xd3, 0xb1, 0x59, 0x48, 0x3e, 0x50, 0xdb, 0x91, - 0x62, 0xfb, 0xef, 0x77, 0xbb, 0xaf, 0x93, 0xce, 0xde, 0x75, 0xfc, 0x69, 0x16, 0xa6, 0x12, 0x35, - 0x03, 0x6f, 0x01, 0x64, 0x3f, 0x79, 0x97, 0xb9, 0xd8, 0x8f, 0x8b, 0x5a, 0x00, 0x43, 0x04, 0x46, - 0x34, 0xe4, 0xb3, 0x0c, 0x4c, 0xdd, 0x37, 0x02, 0xb3, 0xb1, 0x6d, 0x04, 0x0d, 0xb1, 0x75, 0xdc, - 0xa7, 0x0c, 0x72, 0x2f, 0xce, 0xb5, 0xfc, 0xac, 0xd4, 0x63, 0x2a, 0x81, 0xc0, 0xa4, 0x7c, 0x72, - 0x03, 0x46, 0x5b, 0xae, 0x6d, 0x5b, 0x4e, 0x5d, 0x36, 0x3e, 0xaa, 0xaa, 0x76, 0x5b, 0x80, 0x31, - 0xc4, 0xc7, 0xdb, 0xbc, 0x73, 0x7d, 0xd9, 0xac, 0x49, 0xbc, 0xd2, 0x8b, 0xea, 0x5a, 0xf8, 0x79, - 0x16, 0x48, 0xa7, 0x95, 0x3d, 0xea, 0x48, 0xc2, 0x35, 0x18, 0x31, 0xa3, 0x8f, 0xa6, 0x75, 0xfc, - 0xc8, 0x77, 0x2b, 0xb1, 0xa2, 0xc9, 0xce, 0xa7, 0x66, 0xdb, 0xa3, 0x9d, 0xbd, 0xb4, 0x02, 0x8e, - 0x8a, 0x22, 0xd6, 0x93, 0x92, 0x7b, 0x64, 0x4f, 0xca, 0xa7, 0x9d, 0xad, 0x8d, 0x1f, 0xf4, 0xdd, - 0xdd, 0xba, 0x68, 0x6c, 0xdb, 0xe3, 0xad, 0xb3, 0x0d, 0xd1, 0x01, 0x24, 0x3b, 0x79, 0xce, 0xd9, - 0x23, 0x34, 0x29, 0xbb, 0x6b, 0xe5, 0x60, 0xd4, 0x18, 0xf5, 0xf6, 0x75, 0xff, 0xae, 0x00, 0x33, - 0x1d, 0x93, 0x4d, 0x32, 0x0b, 0x43, 0x96, 0xe8, 0x51, 0xcb, 0x96, 0x41, 0x3e, 0xd1, 0xd0, 0xda, - 0x32, 0x0e, 0x59, 0x35, 0x12, 0x44, 0x1b, 0x7f, 0x83, 0xa8, 0x9f, 0xc5, 0xb6, 0x75, 0xc7, 0x36, - 0xdf, 0xcb, 0x30, 0xec, 0xde, 0x77, 0xa8, 0x27, 0xfb, 0xbb, 0x94, 0xcf, 0x46, 0x1b, 0xd7, 0x77, - 0x18, 0x1a, 0x05, 0x15, 0xf9, 0x7d, 0x00, 0x8f, 0xb6, 0x5c, 0xdf, 0x0a, 0x5c, 0x2f, 0x3c, 0x9d, - 0xf2, 0xbc, 0x1c, 0x73, 0x39, 0xb6, 0xd9, 0x2d, 0x69, 0x50, 0xa3, 0x27, 0xf3, 0x30, 0x22, 0x34, - 0xe5, 0x56, 0x53, 0x28, 0x03, 0x33, 0x5c, 0x31, 0x77, 0x41, 0x89, 0x21, 0x77, 0x20, 0x6f, 0xb4, - 0xac, 0x5d, 0xf7, 0x80, 0x3a, 0xdd, 0x7d, 0x4a, 0xbe, 0xba, 0x55, 0xda, 0x5e, 0xe3, 0x43, 0x51, - 0x31, 0x21, 0xef, 0xc3, 0x84, 0x7c, 0x58, 0x69, 0x20, 0xa3, 0xdd, 0x70, 0x9d, 0x39, 0x79, 0x38, - 0x37, 0x71, 0x4f, 0x1f, 0x8f, 0x71, 0x76, 0x31, 0x4f, 0xcb, 0x3f, 0xd2, 0xd3, 0xae, 0xc1, 0x88, - 0x61, 0x06, 0xd6, 0x91, 0x38, 0x0f, 0xa2, 0x75, 0x60, 0x97, 0x38, 0x14, 0x25, 0x56, 0x9e, 0xc9, - 0x0a, 0xc2, 0xc8, 0x0e, 0x1d, 0x67, 0xb2, 0x42, 0x14, 0xea, 0x74, 0xe4, 0x75, 0x98, 0x10, 0x46, - 0x53, 0x36, 0x7c, 0xba, 0x87, 0x1b, 0xfc, 0x50, 0x45, 0xa1, 0xfc, 0xb4, 0x1c, 0x38, 0xb1, 0xaa, - 0x23, 0x31, 0x4e, 0x4b, 0x4a, 0x30, 0x25, 0x00, 0x7b, 0x2d, 0xdb, 0x35, 0x6a, 0x6c, 0xf8, 0x78, - 0xdc, 0x2a, 0x56, 0xe3, 0x68, 0x4c, 0xd2, 0x93, 0x37, 0x81, 0xd4, 0x78, 0x6f, 0xfa, 0x6d, 0xd7, - 0x3d, 0xb8, 0xe3, 0xdc, 0xb2, 0x1c, 0xcb, 0x17, 0x87, 0x1c, 0xf2, 0xe5, 0x59, 0xc9, 0x85, 0x2c, - 0x77, 0x50, 0x60, 0xca, 0x28, 0xf2, 0x67, 0x7a, 0x98, 0x11, 0x7b, 0x92, 0xef, 0xf7, 0xbb, 0xfc, - 0xeb, 0x22, 0xca, 0x7c, 0x2f, 0x03, 0xe3, 0xca, 0x96, 0x2d, 0x1a, 0x6e, 0x57, 0xf6, 0x3a, 0xef, - 0x61, 0xee, 0x15, 0xf9, 0x8b, 0x45, 0xfd, 0xa8, 0x9f, 0x46, 0x87, 0x62, 0x4c, 0x76, 0x6f, 0xb1, - 0xe9, 0x64, 0x98, 0xc7, 0xa6, 0x78, 0xe9, 0xa9, 0xc7, 0x9f, 0xcc, 0xc5, 0xc5, 0x9f, 0x45, 0x28, - 0x30, 0xb6, 0xd4, 0x0c, 0xd6, 0x96, 0x65, 0x4a, 0x53, 0xf3, 0x97, 0xed, 0x10, 0x81, 0x11, 0x8d, - 0x16, 0x43, 0xb2, 0xa7, 0xc6, 0x90, 0xb7, 0x60, 0xcc, 0xe0, 0x3d, 0xfb, 0x22, 0x8c, 0x74, 0xd5, - 0x86, 0xca, 0xdb, 0x97, 0x4b, 0xd1, 0x68, 0xd4, 0x59, 0x91, 0x0a, 0x3c, 0x2d, 0x3a, 0x89, 0x2b, - 0x95, 0x8d, 0xbb, 0xd4, 0xb3, 0xf6, 0x2d, 0x53, 0x34, 0x12, 0x8b, 0xf3, 0x14, 0x2f, 0x48, 0xd5, - 0x9f, 0x5e, 0x49, 0x23, 0xc2, 0xf4, 0xb1, 0xd2, 0x69, 0x6d, 0x43, 0x39, 0xed, 0x48, 0x87, 0xd3, - 0x46, 0x48, 0x8c, 0xd3, 0x9e, 0xe2, 0x71, 0xf9, 0xde, 0x3d, 0xae, 0xd0, 0x2f, 0x8f, 0x8b, 0xdb, - 0xd9, 0x45, 0x4d, 0xaf, 0x7e, 0x92, 0x87, 0xa9, 0xc4, 0x9a, 0x46, 0xea, 0xdc, 0x37, 0xf3, 0x84, - 0xe7, 0xbe, 0x57, 0x21, 0x17, 0xb0, 0x08, 0x3f, 0x14, 0xef, 0x20, 0xe7, 0xa1, 0x9d, 0x63, 0x98, - 0x79, 0x98, 0x0d, 0x6a, 0x1e, 0x84, 0x87, 0x23, 0x64, 0xaa, 0x56, 0xe6, 0xb1, 0xa4, 0x23, 0x31, - 0x4e, 0x4b, 0xbe, 0x0a, 0x05, 0xa3, 0x56, 0xf3, 0xa8, 0xef, 0x53, 0x9f, 0xcf, 0x97, 0x0b, 0xa2, - 0x69, 0xb4, 0x14, 0x02, 0x31, 0xc2, 0xb3, 0x54, 0xd6, 0xa8, 0xed, 0xfb, 0x7b, 0x3e, 0xf5, 0xb8, - 0x41, 0x6b, 0xe7, 0x25, 0xd8, 0xab, 0x64, 0x70, 0x54, 0x14, 0xa4, 0x06, 0x53, 0x07, 0x5e, 0x75, - 0x69, 0xc9, 0x30, 0x1b, 0xf4, 0x71, 0xe6, 0x5e, 0xfc, 0xc8, 0xcd, 0x7a, 0x9c, 0x03, 0x26, 0x59, - 0x4a, 0x29, 0xeb, 0xf4, 0x38, 0x30, 0xaa, 0x8f, 0x93, 0xc0, 0x43, 0x29, 0x3a, 0x07, 0x4c, 0xb2, - 0x64, 0xe9, 0xf6, 0xc0, 0xab, 0x86, 0xdd, 0xe7, 0xdc, 0x7d, 0xb4, 0x74, 0xbb, 0x1e, 0xa1, 0x50, - 0xa7, 0x63, 0x2f, 0xec, 0xc0, 0xab, 0x22, 0x35, 0xec, 0x26, 0xcf, 0xe7, 0xda, 0x0b, 0x5b, 0x97, - 0x70, 0x54, 0x14, 0xa4, 0x05, 0x84, 0x3d, 0x1d, 0xff, 0xee, 0xaa, 0x69, 0x57, 0x1e, 0xc0, 0xbc, - 0x9e, 0xf6, 0x34, 0x8a, 0x48, 0x7f, 0xa0, 0x67, 0x98, 0x43, 0xaf, 0x77, 0xf0, 0xc1, 0x14, 0xde, - 0xe4, 0x6d, 0x78, 0xf6, 0xc0, 0xab, 0xca, 0x5e, 0xc5, 0x6d, 0xcf, 0x72, 0x4c, 0xab, 0x65, 0x88, - 0x7e, 0x7e, 0x31, 0x31, 0x98, 0x93, 0xea, 0x3e, 0xbb, 0x9e, 0x4e, 0x86, 0xa7, 0x8d, 0x8f, 0x17, - 0x62, 0xe3, 0x7d, 0x29, 0xc4, 0x12, 0xee, 0x7a, 0x51, 0x91, 0xe2, 0x9f, 0x33, 0x40, 0xf8, 0xbe, - 0x4a, 0x78, 0xd2, 0x7d, 0xd5, 0x73, 0xdb, 0x2d, 0x96, 0x99, 0xea, 0xec, 0x0f, 0xad, 0xd3, 0x55, - 0x65, 0xa6, 0xd5, 0x10, 0x81, 0x11, 0x0d, 0x9b, 0xda, 0xb9, 0x76, 0x8d, 0xaa, 0xf3, 0x1d, 0x6a, - 0x6a, 0x77, 0x87, 0x43, 0x51, 0x62, 0xc9, 0x2a, 0xcc, 0x78, 0xb4, 0x6a, 0xd8, 0x86, 0x63, 0xd2, - 0x4a, 0xe0, 0x19, 0x01, 0xad, 0x1f, 0x4b, 0x9f, 0x7e, 0x4e, 0x0e, 0x99, 0xc1, 0x24, 0x01, 0x76, - 0x8e, 0x99, 0xff, 0x97, 0x51, 0x98, 0x4e, 0x6e, 0x08, 0x3d, 0xaa, 0x7e, 0x64, 0xf9, 0xd6, 0xf0, - 0x02, 0x4b, 0x3b, 0xfd, 0x12, 0xe5, 0xdb, 0x10, 0x81, 0x11, 0x0d, 0x79, 0x11, 0x86, 0x03, 0xb7, - 0x65, 0x99, 0x52, 0x43, 0xb5, 0xe3, 0xbf, 0xcb, 0x80, 0x28, 0x70, 0xe9, 0x47, 0x2a, 0x72, 0x17, - 0x76, 0xa4, 0x42, 0x1e, 0x92, 0x18, 0xee, 0xf3, 0x21, 0x89, 0xee, 0xce, 0xb5, 0x7f, 0xa2, 0x3b, - 0x84, 0xe8, 0x63, 0x7d, 0xaf, 0xcf, 0xbb, 0x7d, 0xdd, 0xcd, 0x56, 0x27, 0x4c, 0xdd, 0x9e, 0xe5, - 0x11, 0x92, 0x9d, 0x7e, 0xa8, 0x14, 0x73, 0x14, 0x51, 0x22, 0xc5, 0x40, 0x18, 0x17, 0x4d, 0xb6, - 0xe1, 0xb2, 0x6d, 0x35, 0xad, 0x40, 0x4c, 0xd3, 0xb6, 0xa9, 0x57, 0xa1, 0xa6, 0xeb, 0xd4, 0x78, - 0xc8, 0xcc, 0x46, 0xf5, 0xe3, 0x46, 0x0a, 0x0d, 0xa6, 0x8e, 0x24, 0x37, 0x60, 0xf4, 0x88, 0x7a, - 0x3e, 0x33, 0x62, 0x88, 0x1f, 0xc4, 0xbc, 0x2b, 0xc0, 0x18, 0xe2, 0xc9, 0xdb, 0x90, 0xf3, 0x0d, - 0xdf, 0x96, 0xc7, 0xcb, 0x1f, 0xa3, 0x79, 0xa1, 0x54, 0xd9, 0x90, 0xe6, 0xc1, 0x8f, 0x5a, 0xb3, - 0xdf, 0xc8, 0x59, 0xf6, 0x16, 0x76, 0xfe, 0x2d, 0x07, 0x53, 0x89, 0x3d, 0xd4, 0x47, 0x39, 0xaf, - 0xf2, 0xc5, 0xa1, 0x33, 0x7c, 0xf1, 0x25, 0xc8, 0x9b, 0xb6, 0x45, 0x9d, 0x60, 0xad, 0x26, 0x7d, - 0x36, 0x6a, 0xbc, 0x16, 0xf0, 0x65, 0x54, 0x14, 0x4f, 0xda, 0x73, 0x75, 0x17, 0x1b, 0x3e, 0xef, - 0x61, 0xa8, 0x91, 0x41, 0x5e, 0x1d, 0x31, 0xda, 0x97, 0x54, 0x96, 0xf8, 0xb0, 0x17, 0x95, 0xca, - 0x7e, 0x3a, 0x04, 0xf9, 0xad, 0xd2, 0x6e, 0xa5, 0xd4, 0x0e, 0x1a, 0xe4, 0x1d, 0x18, 0xae, 0x1a, - 0xbe, 0x65, 0xca, 0x29, 0x6e, 0x2f, 0x77, 0x0c, 0x14, 0x98, 0x95, 0xf1, 0x9f, 0x28, 0x78, 0x92, - 0x5b, 0xcc, 0x14, 0x59, 0x71, 0xd5, 0xd5, 0x99, 0xee, 0x82, 0xb0, 0x56, 0x56, 0x56, 0x89, 0xe1, - 0x64, 0x09, 0x72, 0xce, 0x41, 0xb7, 0x87, 0xb5, 0xb9, 0x1f, 0x6e, 0xad, 0xd3, 0x63, 0xe4, 0x83, - 0xc9, 0x1e, 0x80, 0xe9, 0xd1, 0x1a, 0x75, 0x02, 0x4b, 0x5e, 0x18, 0xd3, 0xdd, 0x02, 0xe0, 0x92, - 0x1a, 0x8c, 0x1a, 0xa3, 0xf9, 0xbf, 0x18, 0x86, 0xe9, 0x64, 0x6f, 0xc1, 0xa3, 0x5c, 0xf4, 0x06, - 0x8c, 0xfa, 0x6d, 0x7e, 0x3c, 0x4a, 0x3a, 0xa9, 0x0a, 0x4c, 0x15, 0x01, 0xc6, 0x10, 0x9f, 0xee, - 0x7a, 0xd9, 0x27, 0xe2, 0x7a, 0xb9, 0xf3, 0xba, 0x5e, 0xbf, 0x53, 0xec, 0x27, 0x9d, 0x87, 0xa4, - 0xdf, 0xeb, 0x73, 0x37, 0x48, 0x17, 0x49, 0x93, 0xca, 0x4b, 0x39, 0x46, 0xfb, 0x72, 0x42, 0x29, - 0x74, 0xc4, 0xfe, 0xde, 0xd0, 0xf1, 0x1f, 0x39, 0x98, 0x8c, 0x6f, 0xdb, 0xb1, 0xe2, 0xa5, 0xe1, - 0xfa, 0x81, 0x2c, 0xe9, 0x92, 0xf7, 0x37, 0xdd, 0x8e, 0x50, 0xa8, 0xd3, 0x9d, 0x2f, 0x9b, 0xdc, - 0x80, 0x51, 0x79, 0xba, 0x57, 0x26, 0x13, 0x65, 0xcf, 0xf2, 0x04, 0x30, 0x86, 0xf8, 0xff, 0x4f, - 0x25, 0xb6, 0x4f, 0xbe, 0xdb, 0x99, 0x4a, 0xde, 0xe9, 0xeb, 0x1e, 0xed, 0x45, 0x65, 0x92, 0xb7, - 0x61, 0xa6, 0x63, 0x65, 0x92, 0x59, 0x8c, 0xd8, 0x2c, 0xc8, 0xc4, 0x2d, 0x26, 0xb6, 0x45, 0x30, - 0x07, 0xc3, 0xac, 0x36, 0x16, 0x27, 0x15, 0x0b, 0x22, 0xe4, 0xb3, 0xfa, 0xc8, 0x47, 0x01, 0x9f, - 0xff, 0xd5, 0x30, 0xcc, 0x74, 0x74, 0x05, 0xc5, 0x17, 0x02, 0x33, 0xe7, 0x58, 0x08, 0x7c, 0x03, - 0x26, 0xb9, 0x89, 0x6e, 0x27, 0x96, 0x0f, 0x55, 0xcf, 0xf8, 0x6e, 0x0c, 0x8b, 0x09, 0xea, 0xf3, - 0x15, 0x36, 0x6f, 0xc0, 0xa4, 0xdf, 0xae, 0xfa, 0xa6, 0x67, 0xb5, 0x98, 0xad, 0xad, 0x2d, 0xcb, - 0x3d, 0x0f, 0x25, 0xa4, 0x12, 0xc3, 0x62, 0x82, 0x9a, 0xd4, 0xf9, 0xe9, 0x7f, 0x99, 0x50, 0xe4, - 0xf2, 0x45, 0x57, 0x87, 0xd8, 0x2f, 0xcb, 0x0b, 0x02, 0x62, 0x2c, 0xb0, 0x83, 0x29, 0xa9, 0xc2, - 0xac, 0x58, 0xd0, 0xd3, 0x15, 0x52, 0xcb, 0x81, 0xa2, 0x7a, 0x99, 0x97, 0x4a, 0xcf, 0x2e, 0x9f, - 0x4a, 0x89, 0x67, 0x70, 0xe9, 0xf2, 0xe4, 0xfa, 0x3a, 0x4c, 0x45, 0x5a, 0xfa, 0xb7, 0x2c, 0x3b, - 0x5c, 0x56, 0xf9, 0x35, 0x39, 0xe8, 0xb9, 0x68, 0xbf, 0x68, 0x29, 0x4e, 0x88, 0xc9, 0x91, 0x83, - 0x58, 0x99, 0xec, 0x30, 0xc1, 0x8b, 0x72, 0xad, 0xff, 0x1c, 0x61, 0xf6, 0x9f, 0xe8, 0x76, 0x20, - 0xf3, 0x30, 0xc2, 0x4d, 0x8e, 0xc5, 0x6f, 0xb5, 0xae, 0xcd, 0x6d, 0xd1, 0x47, 0x89, 0x39, 0xc7, - 0x5a, 0xa1, 0x9c, 0x9d, 0x64, 0x4f, 0x99, 0x9d, 0xb4, 0xe0, 0x52, 0x60, 0xfb, 0xbb, 0x5e, 0xdb, - 0x0f, 0x96, 0xa8, 0x17, 0xf8, 0xd2, 0x22, 0x73, 0x5d, 0xdf, 0xcb, 0xb3, 0xbb, 0x51, 0x49, 0x72, - 0xc1, 0x34, 0xd6, 0xcc, 0x2e, 0x03, 0xdb, 0x2f, 0xd9, 0xb6, 0x7b, 0x3f, 0xdc, 0x0d, 0x8b, 0xa2, - 0xb9, 0x8c, 0xd3, 0xca, 0x2e, 0x77, 0x37, 0x2a, 0xa7, 0x50, 0xe2, 0x19, 0x5c, 0xc8, 0x26, 0x7f, - 0xaa, 0xbb, 0x86, 0x6d, 0xd5, 0x8c, 0x80, 0xb2, 0x7c, 0xc7, 0x17, 0xf1, 0x84, 0xd1, 0x7f, 0x45, - 0x32, 0x67, 0x2a, 0x27, 0x49, 0x30, 0x6d, 0xdc, 0xa0, 0x2e, 0xa8, 0x4b, 0x4d, 0x8f, 0xf9, 0x27, - 0x92, 0x1e, 0x0b, 0x8f, 0x74, 0xde, 0x98, 0xbf, 0x41, 0x9f, 0xfc, 0x2d, 0x61, 0xf2, 0x17, 0xe5, - 0x6f, 0xff, 0x94, 0x83, 0xe9, 0x64, 0xcb, 0xd5, 0xe3, 0xce, 0x99, 0xf4, 0xcb, 0x48, 0x86, 0xfa, - 0x71, 0x19, 0xc9, 0x22, 0x14, 0x78, 0x56, 0x6c, 0x19, 0x66, 0x78, 0xc7, 0x8a, 0x4a, 0x7b, 0x5b, - 0x21, 0x02, 0x23, 0x1a, 0x32, 0x0b, 0x43, 0xb5, 0xaa, 0x3c, 0x62, 0xae, 0x5a, 0x08, 0x96, 0xcb, - 0x38, 0x54, 0xab, 0x92, 0xeb, 0x90, 0x97, 0x93, 0xb1, 0x70, 0x87, 0x9d, 0x8b, 0x95, 0x33, 0x35, - 0x1f, 0x15, 0x76, 0x50, 0xd3, 0x9f, 0x01, 0xac, 0x81, 0x25, 0xbf, 0xdc, 0x85, 0xb5, 0xe7, 0xe4, - 0xe0, 0x52, 0xca, 0x91, 0x88, 0xf8, 0x07, 0xcb, 0x9c, 0xe3, 0x83, 0x1d, 0xc2, 0xc8, 0xbe, 0x65, - 0x07, 0xd4, 0xeb, 0x53, 0x5b, 0x47, 0xa8, 0xd4, 0x2d, 0xce, 0x54, 0xe4, 0x09, 0xf1, 0x37, 0x4a, - 0x41, 0xcc, 0x7b, 0x2f, 0xf3, 0x75, 0xe9, 0x70, 0x31, 0x2c, 0x3c, 0xdd, 0x9e, 0x95, 0xdf, 0xfb, - 0x5c, 0xf7, 0x52, 0xac, 0xa6, 0x70, 0x88, 0x16, 0xeb, 0xd2, 0xb0, 0x98, 0x2a, 0x95, 0x2c, 0x01, - 0xa8, 0xfe, 0xb3, 0x70, 0x13, 0xea, 0x45, 0x56, 0x77, 0xab, 0x06, 0x35, 0xff, 0x97, 0x7c, 0xcd, - 0x5b, 0x7b, 0xdb, 0x3c, 0xa7, 0x69, 0xc3, 0xe2, 0xd7, 0x69, 0x0d, 0xf7, 0xe5, 0x3a, 0xad, 0x94, - 0xcf, 0x7b, 0x51, 0xd6, 0xf5, 0x8f, 0x59, 0x98, 0x8c, 0x7f, 0x48, 0x72, 0x0d, 0x46, 0x5a, 0x1e, - 0xdd, 0xb7, 0x1e, 0x24, 0xef, 0x72, 0xda, 0xe6, 0x50, 0x94, 0x58, 0xe2, 0xc2, 0x88, 0x6d, 0x54, - 0x99, 0x8b, 0x8b, 0xab, 0x40, 0x56, 0x7b, 0xbe, 0xd6, 0x22, 0x5c, 0x0a, 0x09, 0x05, 0x6e, 0x70, - 0xf6, 0x28, 0xc5, 0x30, 0x81, 0xfb, 0x16, 0xb5, 0x6b, 0x62, 0xc7, 0x7d, 0x10, 0x02, 0x6f, 0x71, - 0xf6, 0x28, 0xc5, 0x90, 0x77, 0xa0, 0x20, 0x2e, 0xc0, 0xaa, 0x95, 0x8f, 0xe5, 0xdc, 0xe4, 0x37, - 0xce, 0x67, 0xb2, 0xbb, 0x56, 0x93, 0x46, 0xee, 0xb8, 0x14, 0x32, 0xc1, 0x88, 0x1f, 0xbf, 0x67, - 0x71, 0x3f, 0xa0, 0x5e, 0x25, 0x30, 0xbc, 0xf0, 0x1a, 0xc4, 0xe8, 0x9e, 0x45, 0x85, 0x41, 0x8d, - 0x6a, 0xfe, 0x5f, 0x87, 0x61, 0x32, 0x7e, 0xb4, 0xe3, 0x09, 0x75, 0x4b, 0xbc, 0x04, 0x79, 0x3e, - 0x15, 0x2c, 0x79, 0x4e, 0xf2, 0xea, 0xbc, 0x5d, 0x09, 0x47, 0x45, 0x41, 0x10, 0x0a, 0xc6, 0xe3, - 0xdd, 0x86, 0x28, 0xb6, 0x88, 0xd5, 0x3d, 0x88, 0x11, 0x1b, 0xc6, 0xd3, 0x0f, 0xc9, 0xbb, 0x9b, - 0x37, 0x72, 0x9e, 0x0a, 0x8c, 0x11, 0x1b, 0x66, 0xf9, 0x1e, 0xad, 0x87, 0xf3, 0x41, 0xcd, 0xf2, - 0x91, 0x43, 0x51, 0x62, 0xc9, 0x0d, 0x18, 0xf5, 0x5c, 0x9b, 0x96, 0x70, 0x4b, 0x76, 0x48, 0xa8, - 0xb5, 0x08, 0x14, 0x60, 0x0c, 0xf1, 0x83, 0xa8, 0xc3, 0xe3, 0x06, 0xd0, 0xc5, 0xaa, 0xd2, 0x2a, - 0xcc, 0x1c, 0xc9, 0x39, 0x66, 0xc5, 0xaa, 0x3b, 0x46, 0x10, 0x75, 0x8a, 0xa9, 0xfd, 0xbe, 0xbb, - 0x49, 0x02, 0xec, 0x1c, 0xd3, 0x5b, 0xc4, 0xf9, 0x07, 0x66, 0xc3, 0xb1, 0x63, 0x41, 0x71, 0xfb, - 0xc8, 0x0c, 0xc0, 0x3e, 0x86, 0xfa, 0x6d, 0x1f, 0xd9, 0x33, 0xed, 0xe3, 0x45, 0x18, 0xe6, 0x37, - 0xfb, 0xca, 0x1a, 0x5d, 0x55, 0xf4, 0xfc, 0xea, 0x56, 0x14, 0x38, 0x52, 0x82, 0xa9, 0xfb, 0x86, - 0x15, 0xb0, 0x48, 0x21, 0xf6, 0x92, 0xc4, 0xa2, 0x66, 0x56, 0x6f, 0xd9, 0x88, 0xa1, 0x31, 0x49, - 0xdf, 0x8d, 0x1d, 0x76, 0x57, 0x32, 0xbf, 0x01, 0x93, 0x5c, 0xc9, 0x92, 0x69, 0xba, 0x6d, 0xbe, - 0x81, 0x93, 0x8f, 0xaf, 0x36, 0xec, 0xe8, 0xd8, 0x65, 0x4c, 0x50, 0xc7, 0xad, 0xbe, 0x3f, 0xb7, - 0xc7, 0xc4, 0x4d, 0xe6, 0xa2, 0xd2, 0xe3, 0x77, 0x20, 0x1f, 0xda, 0x05, 0x2b, 0x69, 0xd5, 0xb8, - 0xa8, 0xa4, 0x65, 0x26, 0xc2, 0x99, 0x2c, 0x42, 0xc1, 0x6d, 0xd1, 0xd8, 0x75, 0x8e, 0x2a, 0x01, - 0xdc, 0x09, 0x11, 0x18, 0xd1, 0x30, 0x2b, 0x11, 0x52, 0x13, 0xeb, 0x3e, 0x77, 0x19, 0x50, 0x2a, - 0x31, 0xff, 0x71, 0x06, 0xc2, 0x2b, 0x9e, 0xc8, 0x32, 0x0c, 0xb7, 0x5c, 0x2f, 0x10, 0x85, 0xf9, - 0xd8, 0xcd, 0xb9, 0x74, 0x73, 0x16, 0x2d, 0x12, 0xae, 0x17, 0x44, 0x1c, 0xd9, 0x2f, 0x1f, 0xc5, - 0x60, 0xa6, 0xa7, 0x69, 0xb7, 0xfd, 0x80, 0x7a, 0x6b, 0xdb, 0x49, 0x3d, 0x97, 0x42, 0x04, 0x46, - 0x34, 0xf3, 0xff, 0x9b, 0x85, 0xe9, 0xe4, 0x49, 0x28, 0xf2, 0x3e, 0x4c, 0xf8, 0x56, 0xdd, 0xb1, - 0x9c, 0xba, 0x2c, 0xdd, 0x33, 0x5d, 0x37, 0xb3, 0x56, 0xf4, 0xf1, 0x18, 0x67, 0xd7, 0xb7, 0x6d, - 0x1d, 0x2d, 0x3d, 0x66, 0x2f, 0x2e, 0x3d, 0x7e, 0xd2, 0x79, 0x38, 0xe0, 0xbd, 0x3e, 0x9f, 0x45, - 0xbb, 0x28, 0x0f, 0xf8, 0xef, 0x61, 0x78, 0x26, 0xfd, 0xd4, 0xd9, 0x13, 0x9a, 0x7a, 0x44, 0x7d, - 0x97, 0x43, 0xa7, 0xf6, 0x5d, 0x06, 0xaa, 0xd4, 0xc9, 0xf6, 0xe9, 0x14, 0x99, 0x7a, 0x01, 0x67, - 0x54, 0x3b, 0xfa, 0xa4, 0x28, 0xf7, 0xc8, 0x49, 0xd1, 0x35, 0x18, 0xa9, 0xb6, 0xcd, 0x03, 0xb9, - 0x0e, 0xab, 0x5f, 0x99, 0xca, 0xa1, 0x28, 0xb1, 0x5a, 0xd2, 0x19, 0x39, 0x33, 0xe9, 0xb0, 0x24, - 0xda, 0x0e, 0x1a, 0xa2, 0xd3, 0x74, 0xb4, 0xfb, 0x24, 0x1a, 0x8e, 0xc5, 0x88, 0x0d, 0x6f, 0x12, - 0x6f, 0x59, 0x7b, 0xb8, 0x21, 0xe3, 0x7f, 0xd4, 0x24, 0xbe, 0xbd, 0xb6, 0x87, 0x1b, 0x28, 0xb1, - 0xe4, 0xb3, 0xce, 0x78, 0x6f, 0x0e, 0xe4, 0xa4, 0xe3, 0x45, 0x59, 0xbd, 0x09, 0x33, 0x1d, 0xdf, - 0xfc, 0xdc, 0x85, 0xd1, 0x35, 0x18, 0xf1, 0xdb, 0xfb, 0x8c, 0x2e, 0x71, 0x34, 0xa6, 0xc2, 0xa1, - 0x28, 0xb1, 0xf3, 0x3f, 0xc8, 0x31, 0x29, 0x89, 0xf3, 0x89, 0x4f, 0xc8, 0xab, 0x5e, 0x87, 0x09, - 0x51, 0x9a, 0xdc, 0xd3, 0x8e, 0x7e, 0xe4, 0xb5, 0xde, 0x4e, 0x1d, 0x89, 0x71, 0x5a, 0xb2, 0xc6, - 0xcd, 0xa4, 0xeb, 0xc9, 0x3d, 0x48, 0x4b, 0x62, 0x29, 0x54, 0x32, 0x20, 0xaf, 0xc0, 0x18, 0x7f, - 0x08, 0xf1, 0xca, 0x65, 0x8d, 0xce, 0x5b, 0xa1, 0x57, 0x22, 0x30, 0xea, 0x34, 0xf1, 0x25, 0xc2, - 0xe1, 0xbe, 0x2c, 0x11, 0x76, 0x7c, 0x95, 0x8b, 0xb2, 0xbb, 0xef, 0xe7, 0x41, 0x5d, 0x21, 0x49, - 0xcc, 0x8e, 0x8b, 0x3c, 0x7f, 0xaf, 0xeb, 0x05, 0xb2, 0x50, 0x15, 0xb1, 0x00, 0x97, 0x52, 0x14, - 0xbc, 0x09, 0x44, 0xde, 0x1c, 0x29, 0xa7, 0x6f, 0xda, 0x7f, 0x30, 0xa3, 0xda, 0xb6, 0x2b, 0x1d, - 0x14, 0x98, 0x32, 0x8a, 0xbc, 0xc9, 0x6f, 0x9b, 0x0d, 0x0c, 0xcb, 0x51, 0x91, 0xf7, 0x85, 0x53, - 0xda, 0x49, 0x05, 0x91, 0xba, 0x37, 0x56, 0xfc, 0xc4, 0x68, 0x38, 0x59, 0x81, 0xd1, 0x23, 0xd7, - 0x6e, 0x37, 0xe5, 0x42, 0xcd, 0xd8, 0xcd, 0xd9, 0x34, 0x4e, 0x77, 0x39, 0x89, 0xd6, 0x74, 0x25, - 0x86, 0x60, 0x38, 0x96, 0x50, 0x98, 0xe2, 0xcb, 0xf4, 0x56, 0x70, 0x2c, 0x1d, 0x40, 0x6e, 0x7b, - 0x5d, 0x4b, 0x63, 0xb7, 0xed, 0xd6, 0x2a, 0x71, 0x6a, 0x79, 0x1f, 0x7f, 0x1c, 0x88, 0x49, 0x9e, - 0xe4, 0x16, 0xe4, 0x8d, 0xfd, 0x7d, 0xcb, 0xb1, 0x82, 0x63, 0xb9, 0x96, 0xf9, 0x7c, 0x1a, 0xff, - 0x92, 0xa4, 0x91, 0x67, 0x84, 0xe4, 0x2f, 0x54, 0x63, 0xc9, 0x1e, 0x8c, 0x05, 0xae, 0x2d, 0x67, - 0x88, 0xbe, 0x2c, 0x18, 0xaf, 0xa4, 0xb1, 0xda, 0x55, 0x64, 0xd1, 0x6a, 0x71, 0x04, 0xf3, 0x51, - 0xe7, 0x43, 0xfe, 0x32, 0x03, 0xe3, 0x8e, 0x5b, 0xa3, 0xa1, 0xeb, 0xc9, 0xdb, 0x25, 0xdf, 0xee, - 0xd3, 0xd5, 0xa7, 0x0b, 0x5b, 0x1a, 0x6f, 0xe1, 0x21, 0xea, 0xec, 0x88, 0x8e, 0xc2, 0x98, 0x12, - 0xc4, 0x81, 0x69, 0xab, 0x69, 0xd4, 0xe9, 0x76, 0xdb, 0x96, 0xbb, 0x87, 0xbe, 0x4c, 0x1e, 0xa9, - 0x4d, 0xc8, 0xfc, 0x7f, 0x59, 0x12, 0x37, 0xfe, 0x22, 0xdd, 0xa7, 0x1e, 0xbf, 0x78, 0x58, 0xdd, - 0x5d, 0xbe, 0x96, 0xe0, 0x84, 0x1d, 0xbc, 0x59, 0xfd, 0xdb, 0xf2, 0x2c, 0x97, 0x7f, 0x37, 0xdb, - 0xf0, 0xc5, 0xd5, 0xb1, 0x10, 0xef, 0x77, 0xdd, 0x4e, 0x12, 0x60, 0xe7, 0x18, 0x72, 0x1d, 0xf2, - 0x21, 0x90, 0x77, 0xf3, 0xc9, 0x1b, 0x8d, 0xc2, 0xb1, 0xa8, 0xb0, 0xb3, 0xdf, 0x80, 0x99, 0x8e, - 0x77, 0xd3, 0x55, 0x40, 0xf8, 0xdb, 0x0c, 0x24, 0x5b, 0xf7, 0xd9, 0x0c, 0xbe, 0x66, 0x79, 0x9c, - 0xe1, 0x71, 0x72, 0xe5, 0x77, 0x39, 0x44, 0x60, 0x44, 0x43, 0xae, 0x42, 0xae, 0x65, 0x04, 0x8d, - 0xe4, 0x76, 0x1d, 0x63, 0x89, 0x1c, 0x43, 0x6e, 0x02, 0xb0, 0x7f, 0x91, 0xd6, 0xe9, 0x83, 0x96, - 0x2c, 0x48, 0xd4, 0x62, 0xd4, 0xb6, 0xc2, 0xa0, 0x46, 0x35, 0xff, 0x93, 0x11, 0x98, 0x8c, 0xe7, - 0x16, 0x36, 0x03, 0xa2, 0x4e, 0xad, 0xe5, 0x5a, 0x4e, 0x90, 0xfc, 0xff, 0x17, 0x56, 0x24, 0x1c, - 0x15, 0x05, 0xcb, 0x93, 0x4d, 0x1a, 0x34, 0xdc, 0x5a, 0x32, 0x4f, 0x6e, 0x72, 0x28, 0x4a, 0x2c, - 0x57, 0xdf, 0xf5, 0x02, 0xa9, 0x56, 0xa4, 0xbe, 0xeb, 0x05, 0xc8, 0x31, 0xe1, 0x6e, 0x63, 0xee, - 0x94, 0xdd, 0xc6, 0x3a, 0x4c, 0xb3, 0x68, 0x45, 0xbd, 0x25, 0xea, 0x05, 0x8f, 0xbd, 0xf9, 0x5d, - 0x49, 0xb0, 0xc0, 0x0e, 0xa6, 0xfc, 0x3f, 0xff, 0xe0, 0x30, 0x3e, 0xf8, 0x31, 0x4f, 0x22, 0x54, - 0xe2, 0x1c, 0x30, 0xc9, 0x72, 0x10, 0x6b, 0x4a, 0xf1, 0xef, 0xf8, 0xd8, 0x47, 0x5e, 0xf3, 0x7d, - 0x3a, 0xf2, 0x4a, 0x6e, 0xc3, 0x64, 0xf4, 0x72, 0x99, 0xfd, 0xc9, 0x53, 0x0d, 0x57, 0xa5, 0x2a, - 0xc5, 0x68, 0xcb, 0xbe, 0x12, 0xa3, 0xc3, 0xc4, 0x38, 0xb2, 0x02, 0x13, 0xea, 0xfd, 0x71, 0x46, - 0x10, 0x3f, 0x6f, 0x90, 0x64, 0x24, 0xc9, 0x30, 0x3e, 0xaa, 0xa7, 0xac, 0x5e, 0x5e, 0xf8, 0xfc, - 0xcb, 0x2b, 0x4f, 0x7d, 0xf1, 0xe5, 0x95, 0xa7, 0x7e, 0xf6, 0xe5, 0x95, 0xa7, 0x3e, 0x3e, 0xb9, - 0x92, 0xf9, 0xfc, 0xe4, 0x4a, 0xe6, 0x8b, 0x93, 0x2b, 0x99, 0x9f, 0x9d, 0x5c, 0xc9, 0xfc, 0xe2, - 0xe4, 0x4a, 0xe6, 0x07, 0xff, 0x7e, 0xe5, 0xa9, 0x6f, 0xe6, 0xc3, 0xaf, 0xf1, 0x7f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x8e, 0xe4, 0x71, 0x50, 0xdf, 0x70, 0x00, 0x00, + // 7427 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x5d, 0x6c, 0x24, 0xc7, + 0x71, 0xb0, 0x96, 0xdc, 0x5d, 0xee, 0xf6, 0xf2, 0xb7, 0xef, 0x74, 0x5a, 0xd1, 0xba, 0xe3, 0x7d, + 0x14, 0x7c, 0x90, 0xbe, 0x48, 0x64, 0x74, 0x89, 0x63, 0x59, 0x8a, 0x65, 0xec, 0x92, 0xc7, 0x3b, + 0xea, 0x48, 0xde, 0xb2, 0x86, 0x27, 0x9d, 0x2c, 0x4b, 0xf2, 0x70, 0xb6, 0xb9, 0x1c, 0x73, 0x76, + 0x66, 0x39, 0x33, 0x7b, 0x77, 0x3c, 0x20, 0xb6, 0x61, 0xc0, 0x4e, 0xac, 0x1f, 0xdb, 0x4a, 0xe2, + 0x24, 0x48, 0xe0, 0x20, 0x4e, 0x02, 0x07, 0x41, 0x82, 0xbc, 0xc5, 0xc8, 0x6b, 0x90, 0x17, 0x23, + 0xc9, 0x83, 0x93, 0x27, 0x27, 0x06, 0x0e, 0xf6, 0x05, 0x7e, 0xcb, 0x4b, 0xe0, 0xa7, 0xe4, 0x29, + 0xe8, 0x9f, 0xe9, 0xe9, 0xf9, 0x59, 0x1e, 0x97, 0x3b, 0x4b, 0x9e, 0x8c, 0x3c, 0x91, 0xdb, 0x55, + 0x5d, 0x55, 0xd3, 0x53, 0x55, 0x5d, 0x5d, 0xdd, 0x5d, 0x83, 0xd6, 0x5b, 0xa6, 0xbf, 0xdb, 0xdd, + 0x5e, 0x30, 0x9c, 0xf6, 0xa2, 0xee, 0xb6, 0x9c, 0x8e, 0xeb, 0x7c, 0x81, 0xfd, 0xf3, 0x3c, 0xb9, + 0x4d, 0x6c, 0xdf, 0x5b, 0xec, 0xec, 0xb5, 0x16, 0xf5, 0x8e, 0xe9, 0x2d, 0xf2, 0xdf, 0x4e, 0xd7, + 0x35, 0xc8, 0xe2, 0xed, 0x17, 0x74, 0xab, 0xb3, 0xab, 0xbf, 0xb0, 0xd8, 0x22, 0x36, 0x71, 0x75, + 0x9f, 0x34, 0x17, 0x3a, 0xae, 0xe3, 0x3b, 0xf8, 0xd3, 0x21, 0xb9, 0x85, 0x80, 0x1c, 0xfb, 0xe7, + 0x1d, 0xde, 0x7d, 0xa1, 0xb3, 0xd7, 0x5a, 0xa0, 0xe4, 0x16, 0x14, 0x72, 0x0b, 0x01, 0xb9, 0xd9, + 0xcf, 0x1c, 0x59, 0x1a, 0xc3, 0x69, 0xb7, 0x1d, 0x3b, 0xce, 0x7f, 0xf6, 0x79, 0x85, 0x40, 0xcb, + 0x69, 0x39, 0x8b, 0xac, 0x79, 0xbb, 0xbb, 0xc3, 0x7e, 0xb1, 0x1f, 0xec, 0x3f, 0x81, 0x3e, 0xbf, + 0xf7, 0xa2, 0xb7, 0x60, 0x3a, 0x94, 0xe4, 0xa2, 0xe1, 0xb8, 0xf4, 0xc1, 0x12, 0x24, 0x7f, 0x35, + 0xc4, 0x69, 0xeb, 0xc6, 0xae, 0x69, 0x13, 0xf7, 0x20, 0x94, 0xa3, 0x4d, 0x7c, 0x3d, 0xad, 0xd7, + 0x62, 0xaf, 0x5e, 0x6e, 0xd7, 0xf6, 0xcd, 0x36, 0x49, 0x74, 0xf8, 0xb5, 0x87, 0x75, 0xf0, 0x8c, + 0x5d, 0xd2, 0xd6, 0xe3, 0xfd, 0xe6, 0xff, 0x3b, 0x87, 0x66, 0x6a, 0xeb, 0x9b, 0x8d, 0x25, 0xc7, + 0xf6, 0xba, 0x6d, 0xb2, 0xe4, 0xd8, 0x3b, 0x66, 0x0b, 0x7f, 0x02, 0x55, 0x0c, 0xde, 0xe0, 0x6e, + 0xe9, 0xad, 0x6a, 0xee, 0x62, 0xee, 0x99, 0x72, 0xfd, 0xcc, 0x0f, 0xee, 0xcf, 0x3d, 0xf6, 0xe0, + 0xfe, 0x5c, 0x65, 0x29, 0x04, 0x81, 0x8a, 0x87, 0x9f, 0x45, 0x63, 0x7a, 0xd7, 0x77, 0x6a, 0xc6, + 0x5e, 0x75, 0xe4, 0x62, 0xee, 0x99, 0x52, 0x7d, 0x4a, 0x74, 0x19, 0xab, 0xf1, 0x66, 0x08, 0xe0, + 0x78, 0x11, 0x95, 0xc9, 0x5d, 0xc3, 0xea, 0x7a, 0xe6, 0x6d, 0x52, 0x1d, 0x65, 0xc8, 0x33, 0x02, + 0xb9, 0x7c, 0x25, 0x00, 0x40, 0x88, 0x43, 0x69, 0xdb, 0xce, 0x9a, 0x63, 0xe8, 0x56, 0x35, 0x1f, + 0xa5, 0xbd, 0xc1, 0x9b, 0x21, 0x80, 0xe3, 0x4b, 0xa8, 0x68, 0x3b, 0xaf, 0xeb, 0xa6, 0x5f, 0x2d, + 0x30, 0xcc, 0x49, 0x81, 0x59, 0xdc, 0x60, 0xad, 0x20, 0xa0, 0xf3, 0xff, 0x59, 0x41, 0x53, 0xf4, + 0xd9, 0xaf, 0x50, 0xe5, 0xd0, 0x98, 0x2e, 0xe1, 0xf3, 0x68, 0xb4, 0xeb, 0x5a, 0xe2, 0x89, 0x2b, + 0xa2, 0xe3, 0xe8, 0x4d, 0x58, 0x03, 0xda, 0x8e, 0x5f, 0x44, 0xe3, 0xe4, 0xae, 0xb1, 0xab, 0xdb, + 0x2d, 0xb2, 0xa1, 0xb7, 0x09, 0x7b, 0xcc, 0x72, 0xfd, 0xac, 0xc0, 0x1b, 0xbf, 0xa2, 0xc0, 0x20, + 0x82, 0xa9, 0xf6, 0xdc, 0x3a, 0xe8, 0xf0, 0x67, 0x4e, 0xe9, 0x49, 0x61, 0x10, 0xc1, 0xc4, 0x97, + 0x11, 0x72, 0x9d, 0xae, 0x6f, 0xda, 0xad, 0xeb, 0xe4, 0x80, 0x3d, 0x7c, 0xb9, 0x8e, 0x45, 0x3f, + 0x04, 0x12, 0x02, 0x0a, 0x16, 0xfe, 0x0d, 0x34, 0x63, 0x38, 0xb6, 0x4d, 0x0c, 0xdf, 0x74, 0xec, + 0xba, 0x6e, 0xec, 0x39, 0x3b, 0x3b, 0x6c, 0x34, 0x2a, 0x97, 0x5f, 0x5c, 0x38, 0xb2, 0x91, 0x71, + 0x2b, 0x59, 0x10, 0xfd, 0xeb, 0x8f, 0x3f, 0xb8, 0x3f, 0x37, 0xb3, 0x14, 0x27, 0x0b, 0x49, 0x4e, + 0xf8, 0x39, 0x54, 0xfa, 0x82, 0xe7, 0xd8, 0x75, 0xa7, 0x79, 0x50, 0x2d, 0xb2, 0x77, 0x30, 0x2d, + 0x04, 0x2e, 0xbd, 0xaa, 0xdd, 0xd8, 0xa0, 0xed, 0x20, 0x31, 0xf0, 0x4d, 0x34, 0xea, 0x5b, 0x5e, + 0x75, 0x8c, 0x89, 0xf7, 0x52, 0xdf, 0xe2, 0x6d, 0xad, 0x69, 0x5c, 0x6d, 0xeb, 0x63, 0xf4, 0x5d, + 0x6d, 0xad, 0x69, 0x40, 0xe9, 0xe1, 0x77, 0x73, 0xa8, 0x44, 0xed, 0xab, 0xa9, 0xfb, 0x7a, 0xb5, + 0x74, 0x71, 0xf4, 0x99, 0xca, 0xe5, 0xcf, 0x2d, 0x0c, 0xe4, 0x60, 0x16, 0x62, 0xda, 0xb2, 0xb0, + 0x2e, 0xc8, 0x5f, 0xb1, 0x7d, 0xf7, 0x20, 0x7c, 0xc6, 0xa0, 0x19, 0x24, 0x7f, 0xfc, 0xfb, 0x39, + 0x34, 0x15, 0xbc, 0xd5, 0x65, 0x62, 0x58, 0xba, 0x4b, 0xaa, 0x65, 0xf6, 0xc0, 0xb7, 0xb2, 0x90, + 0x29, 0x4a, 0x59, 0x0c, 0xc7, 0x99, 0x07, 0xf7, 0xe7, 0xa6, 0x62, 0x20, 0x88, 0x4b, 0x81, 0xdf, + 0xcb, 0xa1, 0xf1, 0xfd, 0x2e, 0xe9, 0x4a, 0xb1, 0x10, 0x13, 0xeb, 0x66, 0x06, 0x62, 0x6d, 0x2a, + 0x64, 0x85, 0x4c, 0xd3, 0x54, 0xd9, 0xd5, 0x76, 0x88, 0x30, 0xc7, 0x5f, 0x42, 0x65, 0xf6, 0xbb, + 0x6e, 0xda, 0xcd, 0x6a, 0x85, 0x49, 0x02, 0x59, 0x49, 0x42, 0x69, 0x0a, 0x31, 0x26, 0xa8, 0x9f, + 0x91, 0x8d, 0x10, 0xf2, 0xc4, 0x77, 0xd0, 0x98, 0x70, 0x69, 0xd5, 0x71, 0xc6, 0xbe, 0x91, 0x01, + 0xfb, 0x88, 0x77, 0xad, 0x57, 0xa8, 0xd7, 0x12, 0x4d, 0x10, 0x70, 0xc3, 0xb7, 0x50, 0x5e, 0xef, + 0xfa, 0xbb, 0xd5, 0x89, 0x63, 0x9a, 0x41, 0x5d, 0xf7, 0x4c, 0xa3, 0xd6, 0xf5, 0x77, 0xeb, 0xa5, + 0x07, 0xf7, 0xe7, 0xf2, 0xf4, 0x3f, 0x60, 0x14, 0x31, 0xa0, 0x72, 0xd7, 0xb5, 0x34, 0x62, 0xb8, + 0xc4, 0xaf, 0x4e, 0x32, 0xf2, 0x1f, 0x5f, 0xe0, 0xf3, 0x05, 0xa5, 0xb0, 0x40, 0xa7, 0xae, 0x85, + 0xdb, 0x2f, 0x2c, 0x70, 0x8c, 0xeb, 0xe4, 0x40, 0x23, 0x16, 0x31, 0x7c, 0xc7, 0xe5, 0xc3, 0x74, + 0x13, 0xd6, 0x38, 0x04, 0x42, 0x32, 0xd8, 0x47, 0xc5, 0x1d, 0xd3, 0xf2, 0x89, 0x5b, 0x9d, 0xca, + 0x64, 0x94, 0x14, 0xab, 0x5a, 0x61, 0x74, 0xeb, 0x88, 0x7a, 0x6c, 0xfe, 0x3f, 0x08, 0x5e, 0xb3, + 0x2f, 0xa3, 0x89, 0x88, 0xc9, 0xe1, 0x69, 0x34, 0xba, 0x47, 0x0e, 0xb8, 0xbb, 0x06, 0xfa, 0x2f, + 0x3e, 0x8b, 0x0a, 0xb7, 0x75, 0xab, 0x2b, 0x5c, 0x33, 0xf0, 0x1f, 0x2f, 0x8d, 0xbc, 0x98, 0x9b, + 0xff, 0x61, 0x0e, 0x3d, 0xd9, 0xd3, 0x58, 0xe8, 0xfc, 0xd2, 0xec, 0xba, 0xfa, 0xb6, 0x45, 0x18, + 0x35, 0x65, 0x7e, 0x59, 0xe6, 0xcd, 0x10, 0xc0, 0xa9, 0x43, 0xa6, 0xd3, 0xd8, 0x32, 0xb1, 0x88, + 0x4f, 0xc4, 0x4c, 0x27, 0x1d, 0x72, 0x4d, 0x42, 0x40, 0xc1, 0xa2, 0x1e, 0xd1, 0xb4, 0x7d, 0xe2, + 0xda, 0xba, 0x25, 0xa6, 0x3b, 0xe9, 0x2d, 0x56, 0x45, 0x3b, 0x48, 0x0c, 0x65, 0x06, 0xcb, 0x1f, + 0x3a, 0x83, 0x7d, 0x1a, 0x9d, 0x49, 0xd1, 0x6e, 0xa5, 0x7b, 0xee, 0xd0, 0xee, 0x7f, 0x36, 0x82, + 0xce, 0xa5, 0xdb, 0x29, 0xbe, 0x88, 0xf2, 0x36, 0x9d, 0xe0, 0xf8, 0x44, 0x38, 0x2e, 0x08, 0xe4, + 0xd9, 0xc4, 0xc6, 0x20, 0xea, 0x80, 0x8d, 0xf4, 0x35, 0x60, 0xa3, 0x47, 0x1a, 0xb0, 0x48, 0x80, + 0x90, 0x3f, 0x42, 0x80, 0x70, 0xc4, 0x59, 0x9f, 0x12, 0xd6, 0xdd, 0x56, 0xb7, 0x4d, 0x95, 0x90, + 0x4d, 0x4e, 0xe5, 0x90, 0x70, 0x2d, 0x00, 0x40, 0x88, 0x33, 0xff, 0x6e, 0x01, 0x3d, 0x59, 0xbb, + 0xd7, 0x75, 0x09, 0xd3, 0x51, 0xef, 0x5a, 0x77, 0x5b, 0x0d, 0x18, 0x2e, 0xa2, 0xfc, 0xce, 0x7e, + 0xd3, 0x8e, 0x0f, 0xd4, 0xca, 0xe6, 0xf2, 0x06, 0x30, 0x08, 0xee, 0xa0, 0x33, 0xde, 0xae, 0xee, + 0x92, 0x66, 0xcd, 0x30, 0x88, 0xe7, 0x5d, 0x27, 0x07, 0x32, 0x74, 0x38, 0xb2, 0x21, 0x3e, 0xf1, + 0xe0, 0xfe, 0xdc, 0x19, 0x2d, 0x49, 0x05, 0xd2, 0x48, 0xe3, 0x26, 0x9a, 0x8a, 0x35, 0xb3, 0x41, + 0x3f, 0x32, 0x37, 0x36, 0x71, 0xc4, 0xb8, 0x41, 0x9c, 0x24, 0x55, 0x80, 0xdd, 0xee, 0x36, 0x7b, + 0x16, 0x1e, 0x94, 0x48, 0x05, 0xb8, 0xc6, 0x9b, 0x21, 0x80, 0xe3, 0xdf, 0x55, 0xa7, 0xe2, 0x02, + 0x9b, 0x8a, 0x77, 0x06, 0x75, 0xab, 0xbd, 0xde, 0x48, 0x1f, 0x93, 0x72, 0xe8, 0xc4, 0x8a, 0x1f, + 0x15, 0x27, 0xf6, 0x27, 0x45, 0xf4, 0x14, 0x7b, 0x74, 0x66, 0xb3, 0x9a, 0xef, 0xb8, 0x7a, 0x8b, + 0xa8, 0xfa, 0xf8, 0x2a, 0xc2, 0x1e, 0x6f, 0xad, 0x19, 0x86, 0xd3, 0xb5, 0xfd, 0x8d, 0xd0, 0x8c, + 0x67, 0xc5, 0x58, 0x60, 0x2d, 0x81, 0x01, 0x29, 0xbd, 0x70, 0x0b, 0x4d, 0x87, 0xb1, 0x9d, 0xe6, + 0xbb, 0xa6, 0xdd, 0xea, 0x4f, 0x6d, 0xcf, 0x3e, 0xb8, 0x3f, 0x37, 0xbd, 0x14, 0x23, 0x01, 0x09, + 0xa2, 0xd4, 0x26, 0xd9, 0x0c, 0xcc, 0x64, 0x1d, 0x8d, 0xda, 0xe4, 0x66, 0x00, 0x80, 0x10, 0x27, + 0x12, 0x60, 0xe6, 0x1f, 0x1a, 0x60, 0x9e, 0x47, 0xa3, 0x4d, 0x6b, 0x5f, 0xf8, 0x05, 0x19, 0xd4, + 0x2f, 0xaf, 0x6d, 0x02, 0x6d, 0xa7, 0xb1, 0x59, 0xa8, 0x9d, 0x45, 0xa6, 0x9d, 0x66, 0x16, 0xda, + 0xd9, 0xe3, 0x15, 0x1d, 0x4b, 0x41, 0xc7, 0x4e, 0x4e, 0x41, 0xf1, 0xcb, 0x68, 0xa2, 0x49, 0x0c, + 0xa7, 0x49, 0xd6, 0x89, 0xe7, 0xe9, 0x2d, 0x52, 0x2d, 0xb1, 0x81, 0x7b, 0x5c, 0x08, 0x3a, 0xb1, + 0xac, 0x02, 0x21, 0x8a, 0x8b, 0x97, 0xd0, 0xcc, 0x1d, 0xdd, 0xf4, 0xb7, 0xcc, 0x36, 0x59, 0xb5, + 0x35, 0x62, 0x38, 0x76, 0xd3, 0x63, 0x91, 0x6e, 0x81, 0xaf, 0x1f, 0x5e, 0x8f, 0x03, 0x21, 0x89, + 0x3f, 0x98, 0x89, 0xfc, 0xa8, 0x88, 0x66, 0xd9, 0xf8, 0x6b, 0xc4, 0xbd, 0x6d, 0x1a, 0xa4, 0xde, + 0xf5, 0x54, 0x03, 0x49, 0x53, 0xea, 0xdc, 0xd0, 0x95, 0x7a, 0xe4, 0x08, 0x4a, 0xbd, 0x88, 0xca, + 0xbe, 0xd3, 0x31, 0x8d, 0x34, 0x2b, 0xd8, 0x0a, 0x00, 0x10, 0xe2, 0xe0, 0x65, 0x34, 0xed, 0x75, + 0xb7, 0x3d, 0xc3, 0x35, 0x3b, 0x94, 0xaf, 0xe2, 0x8a, 0xab, 0xa2, 0xdf, 0xb4, 0x16, 0x83, 0x43, + 0xa2, 0x47, 0xb0, 0xfc, 0x2a, 0x64, 0xbc, 0xfc, 0xea, 0x6f, 0x0d, 0xf8, 0x6d, 0xd5, 0x06, 0xc7, + 0x98, 0x0d, 0xb6, 0xb2, 0xb0, 0xc1, 0x54, 0x1d, 0x38, 0x96, 0x05, 0x96, 0x4e, 0xd0, 0x02, 0xdf, + 0x40, 0x4f, 0xec, 0x74, 0x2d, 0xeb, 0x60, 0xb3, 0xab, 0x5b, 0xe6, 0x8e, 0x49, 0x9a, 0xf4, 0x45, + 0x79, 0x1d, 0xdd, 0xe0, 0x8b, 0xc6, 0x72, 0x7d, 0x4e, 0x88, 0xfc, 0xc4, 0x4a, 0x3a, 0x1a, 0xf4, + 0xea, 0x3f, 0x98, 0x69, 0xfd, 0x7b, 0x0e, 0x4d, 0xd4, 0x4d, 0x7f, 0xbb, 0x6b, 0xec, 0x11, 0x9f, + 0xae, 0x30, 0xb0, 0x8b, 0x0a, 0xdb, 0x74, 0xe1, 0x21, 0x4c, 0x68, 0x73, 0xc0, 0xe1, 0x91, 0xc4, + 0xc3, 0xd5, 0x4c, 0xf9, 0xc1, 0xfd, 0xb9, 0x02, 0xfb, 0x09, 0x9c, 0x15, 0xbe, 0x89, 0x90, 0x43, + 0x17, 0x36, 0x5b, 0xce, 0x1e, 0xb1, 0xfb, 0x9b, 0x90, 0x26, 0x69, 0xc4, 0x79, 0xa3, 0x16, 0x74, + 0x06, 0x85, 0xd0, 0xfc, 0xf7, 0x73, 0x08, 0x27, 0xf9, 0xe3, 0x1b, 0xa8, 0xd4, 0xf5, 0x68, 0x58, + 0x2e, 0xa6, 0xd1, 0x23, 0xf3, 0x1a, 0xa7, 0x2a, 0x75, 0x53, 0x74, 0x05, 0x49, 0x84, 0x12, 0xec, + 0xe8, 0x9e, 0x77, 0xc7, 0x71, 0x9b, 0xfd, 0x09, 0xcf, 0x08, 0x36, 0x44, 0x57, 0x90, 0x44, 0xe6, + 0x7f, 0x3e, 0x86, 0xce, 0x4a, 0xc1, 0x63, 0xb1, 0x40, 0x93, 0x45, 0xd3, 0xd7, 0x1c, 0x67, 0xef, + 0x86, 0xbd, 0x62, 0xda, 0xa6, 0xb7, 0x2b, 0xd6, 0x04, 0x32, 0x16, 0x58, 0x4e, 0x60, 0x40, 0x4a, + 0x2f, 0xfc, 0x4d, 0xd5, 0x40, 0x47, 0x98, 0x81, 0xea, 0x59, 0xbd, 0xec, 0xe3, 0x9a, 0xe6, 0xd8, + 0x1d, 0xb2, 0xbd, 0xeb, 0x38, 0x7b, 0x22, 0xba, 0x5d, 0x1f, 0x50, 0x9e, 0xd7, 0x39, 0xb5, 0x25, + 0xc7, 0xf6, 0xc9, 0x5d, 0x9f, 0x2f, 0xd3, 0x45, 0x1b, 0x04, 0xac, 0xf0, 0x17, 0xc4, 0x32, 0x3d, + 0xcf, 0x58, 0xae, 0x65, 0x35, 0x04, 0xa9, 0x0b, 0xf7, 0x79, 0x54, 0xe4, 0xbd, 0x58, 0xcc, 0x5c, + 0xe6, 0xae, 0x82, 0xc7, 0xbc, 0x20, 0x20, 0xf8, 0x79, 0x54, 0x70, 0xee, 0xd8, 0x22, 0x84, 0x2d, + 0xd7, 0x9f, 0x10, 0x03, 0x36, 0xb5, 0x4c, 0x3a, 0x2e, 0x31, 0x74, 0x9f, 0x34, 0x6f, 0x50, 0x30, + 0x70, 0x2c, 0xfc, 0xeb, 0x08, 0x51, 0x11, 0x89, 0x41, 0x35, 0x8b, 0x45, 0x15, 0xe5, 0xfa, 0x53, + 0xa2, 0xcf, 0xd9, 0xb0, 0x4f, 0x43, 0xe2, 0x80, 0x82, 0x8f, 0xaf, 0xa1, 0x49, 0x97, 0x74, 0x1c, + 0xcf, 0xf4, 0x1d, 0xf7, 0x40, 0xb3, 0xba, 0x2d, 0xe6, 0x15, 0xcb, 0xf5, 0x8b, 0x82, 0x42, 0x35, + 0xa4, 0x00, 0x11, 0x3c, 0x88, 0xf5, 0xc3, 0xef, 0xe7, 0xd0, 0xb8, 0x6c, 0x32, 0x09, 0x0d, 0x11, + 0x46, 0x33, 0xc8, 0xf5, 0xc8, 0xf1, 0x0c, 0xd9, 0x87, 0x39, 0x56, 0x50, 0xf8, 0x41, 0x84, 0xbb, + 0xe2, 0xe6, 0xd1, 0x47, 0x65, 0x25, 0x70, 0x0f, 0x9d, 0x49, 0x79, 0x5a, 0xfc, 0x74, 0xa0, 0x0f, + 0x3c, 0xe4, 0x9f, 0x10, 0x0f, 0x5f, 0x88, 0x68, 0xc1, 0x2b, 0x89, 0xf7, 0xc8, 0xe3, 0x93, 0x73, + 0x02, 0x7b, 0xf2, 0xf0, 0xb7, 0x37, 0xff, 0x17, 0x15, 0x34, 0x2b, 0x99, 0xd3, 0x29, 0x96, 0xb8, + 0xaa, 0xdf, 0x51, 0x2c, 0x33, 0x77, 0x72, 0x96, 0x19, 0x55, 0xed, 0x91, 0x81, 0x55, 0x7b, 0xf4, + 0x98, 0xaa, 0xfd, 0x0c, 0x2a, 0x09, 0xba, 0x5e, 0x35, 0xcf, 0xec, 0x96, 0x3b, 0x6e, 0xd1, 0x06, + 0x12, 0x8a, 0x7f, 0x3b, 0x6e, 0x04, 0x7c, 0x69, 0x7c, 0x2b, 0x2b, 0x23, 0xe0, 0x6f, 0xa6, 0x4f, + 0x53, 0x08, 0x9d, 0x4e, 0xb1, 0xa7, 0xd3, 0xd9, 0x43, 0xe7, 0xbd, 0x3d, 0xb3, 0x53, 0x77, 0x75, + 0xdb, 0xd8, 0x05, 0xb2, 0xe3, 0x2d, 0xb1, 0x8c, 0x5a, 0xf3, 0x86, 0x7d, 0xa3, 0x43, 0xec, 0x06, + 0x30, 0xc7, 0x52, 0xaa, 0x7f, 0x5c, 0xb0, 0x3b, 0xaf, 0x1d, 0x86, 0x0c, 0x87, 0xd3, 0xc2, 0xb7, + 0x50, 0x45, 0x67, 0x49, 0x07, 0x3e, 0xdf, 0x97, 0xfa, 0x99, 0x32, 0xa7, 0x1e, 0xdc, 0x9f, 0xab, + 0xd4, 0xc2, 0xde, 0xa0, 0x92, 0xc2, 0x6f, 0xa3, 0x09, 0xa1, 0x3c, 0x22, 0x39, 0x5a, 0xee, 0x87, + 0xf6, 0x0c, 0x5d, 0x0b, 0xbd, 0xae, 0xf6, 0x87, 0x28, 0x39, 0xfc, 0x1a, 0x3a, 0xb7, 0x1d, 0xbc, + 0x0b, 0x8f, 0xbd, 0x8b, 0xba, 0xee, 0x91, 0x9b, 0xb0, 0xc6, 0xbc, 0x4c, 0xb9, 0x7e, 0x41, 0x8c, + 0xcf, 0xb9, 0xd8, 0x1b, 0x13, 0x58, 0xd0, 0xa3, 0x77, 0x8f, 0x79, 0xbd, 0x72, 0xac, 0x79, 0x3d, + 0x12, 0x78, 0x8f, 0x67, 0x12, 0x78, 0xf7, 0xf6, 0x0c, 0xc7, 0x0a, 0xbc, 0x27, 0x4e, 0x30, 0xf0, + 0x16, 0x6b, 0xa1, 0xc9, 0x8c, 0xd7, 0x42, 0x2f, 0xa3, 0x09, 0x63, 0x97, 0x18, 0x7b, 0x2c, 0xd5, + 0x7b, 0x5b, 0xb7, 0x58, 0xd2, 0xbc, 0x1c, 0xae, 0xa8, 0x97, 0x54, 0x20, 0x44, 0x71, 0x07, 0x9b, + 0x25, 0xbe, 0x99, 0x43, 0x4f, 0xf6, 0xf4, 0x07, 0xf8, 0x72, 0xc4, 0x65, 0xe6, 0xa2, 0x5b, 0x8b, + 0x3d, 0x1c, 0xe5, 0xa0, 0x73, 0xc7, 0x9f, 0x17, 0xd0, 0x99, 0x25, 0xdd, 0x22, 0x76, 0x53, 0x8f, + 0x4c, 0x1a, 0xcf, 0xa1, 0x92, 0x67, 0xec, 0x92, 0x66, 0xd7, 0x0a, 0xd2, 0x55, 0x52, 0x3d, 0x34, + 0xd1, 0x0e, 0x12, 0x43, 0xe6, 0xd3, 0xe9, 0x60, 0x8e, 0x44, 0xb1, 0xe5, 0x38, 0x4a, 0x0c, 0xfc, + 0x12, 0x9a, 0x14, 0x89, 0x62, 0xc7, 0x5e, 0xd6, 0x7d, 0xe2, 0x55, 0x47, 0x99, 0x6f, 0xc3, 0x54, + 0xde, 0x2b, 0x11, 0x08, 0xc4, 0x30, 0x29, 0x27, 0xdf, 0x6c, 0x93, 0x7b, 0x8e, 0x1d, 0x2c, 0xae, + 0x25, 0xa7, 0x2d, 0xd1, 0x0e, 0x12, 0x03, 0x7f, 0x23, 0x99, 0xe9, 0xfc, 0xfc, 0x80, 0x9a, 0x9b, + 0x32, 0x58, 0x7d, 0xd8, 0xd1, 0x57, 0x72, 0xa8, 0xd2, 0x21, 0xae, 0x67, 0x7a, 0x3e, 0xb1, 0x0d, + 0x22, 0x32, 0x9d, 0x37, 0xb2, 0xb0, 0xa6, 0x46, 0x48, 0x96, 0x3b, 0x5a, 0xa5, 0x01, 0x54, 0xa6, + 0xa7, 0xb3, 0x8a, 0x1e, 0xcc, 0x70, 0xee, 0xa2, 0xb3, 0x4b, 0xba, 0x6f, 0xec, 0x76, 0x3b, 0xdc, + 0xa2, 0xbb, 0xae, 0xee, 0x9b, 0x8e, 0x8d, 0x9f, 0x45, 0x63, 0xc4, 0xd6, 0xb7, 0x2d, 0xd2, 0x8c, + 0xef, 0x13, 0x5d, 0xe1, 0xcd, 0x10, 0xc0, 0xf1, 0x27, 0x50, 0xa5, 0xad, 0xdf, 0x5d, 0x16, 0x3d, + 0x85, 0x9a, 0xca, 0x53, 0x14, 0xeb, 0x21, 0x08, 0x54, 0xbc, 0xf9, 0x2f, 0xa2, 0xb3, 0x9c, 0xe5, + 0xba, 0xde, 0x51, 0x46, 0xf4, 0x08, 0x5b, 0x32, 0xcb, 0x68, 0xda, 0x70, 0x89, 0xee, 0x93, 0xd5, + 0x9d, 0x0d, 0xc7, 0xbf, 0x72, 0xd7, 0xf4, 0x7c, 0xb1, 0x37, 0x23, 0xf3, 0x41, 0x4b, 0x31, 0x38, + 0x24, 0x7a, 0xcc, 0x7f, 0x6b, 0x0c, 0xe1, 0x2b, 0x6d, 0xd3, 0xf7, 0xa3, 0x41, 0xdd, 0x25, 0x54, + 0xdc, 0x76, 0x9d, 0x3d, 0x19, 0x59, 0xca, 0xfd, 0x95, 0x3a, 0x6b, 0x05, 0x01, 0xa5, 0x3e, 0xc5, + 0xd8, 0xd5, 0x6d, 0x9b, 0x58, 0x61, 0x18, 0x26, 0x7d, 0xca, 0x92, 0x84, 0x80, 0x82, 0xc5, 0xce, + 0x9b, 0xf0, 0x5f, 0x4a, 0xee, 0x2b, 0x3c, 0x6f, 0x12, 0x82, 0x40, 0xc5, 0x8b, 0x2c, 0xcd, 0xf3, + 0x59, 0x2f, 0xcd, 0x0b, 0x19, 0x2c, 0xcd, 0xd3, 0xcf, 0x61, 0x14, 0x4f, 0xe5, 0x1c, 0xc6, 0xd8, + 0x51, 0xcf, 0x61, 0x94, 0x32, 0x9e, 0xfc, 0x3e, 0x50, 0x5d, 0x22, 0x5f, 0xe6, 0xbd, 0x33, 0xa8, + 0xfd, 0x27, 0xd4, 0xf3, 0x58, 0x91, 0xc5, 0x47, 0x66, 0xad, 0xf7, 0xe1, 0x08, 0x9a, 0x8e, 0xbb, + 0x5c, 0x7c, 0x0f, 0x8d, 0x19, 0xdc, 0x43, 0x89, 0x55, 0x96, 0x36, 0xf0, 0x44, 0x93, 0xf4, 0x77, + 0xe2, 0xb0, 0x02, 0x87, 0x40, 0xc0, 0x10, 0x7f, 0x39, 0x87, 0xca, 0x46, 0xe0, 0xa4, 0x44, 0x16, + 0x6b, 0x60, 0xf6, 0x29, 0x4e, 0x8f, 0x9f, 0x40, 0x90, 0x10, 0x08, 0x99, 0xce, 0xff, 0x78, 0x04, + 0x55, 0x54, 0xff, 0xf4, 0x79, 0x45, 0xcb, 0xf8, 0x78, 0xfc, 0xb2, 0x62, 0xbb, 0xf2, 0x50, 0x5c, + 0x28, 0x04, 0xc5, 0xa6, 0xd6, 0x7c, 0x63, 0x9b, 0x86, 0x36, 0xf4, 0xe5, 0x84, 0x7e, 0x2a, 0x6c, + 0x53, 0x14, 0xa7, 0x83, 0xf2, 0x5e, 0x87, 0x18, 0xe2, 0x71, 0x37, 0xb2, 0x53, 0x1b, 0xad, 0x43, + 0x8c, 0xd0, 0xa1, 0xd3, 0x5f, 0xc0, 0x38, 0xe1, 0xbb, 0xa8, 0xe8, 0xf9, 0xba, 0xdf, 0xf5, 0x44, + 0x86, 0x2b, 0x43, 0x55, 0xd5, 0x18, 0xdd, 0xd0, 0x8b, 0xf3, 0xdf, 0x20, 0xf8, 0xcd, 0x5f, 0x45, + 0x33, 0x09, 0xbd, 0xa6, 0xae, 0x9d, 0xdc, 0xed, 0xb8, 0xc4, 0xa3, 0xd1, 0x51, 0x3c, 0x5c, 0xbc, + 0x22, 0x21, 0xa0, 0x60, 0xcd, 0xff, 0x24, 0x87, 0xa6, 0x14, 0x4a, 0x6b, 0xa6, 0xe7, 0xe3, 0xcf, + 0x25, 0x5e, 0xd5, 0xc2, 0xd1, 0x5e, 0x15, 0xed, 0xcd, 0x5e, 0x94, 0xb4, 0xef, 0xa0, 0x45, 0x79, + 0x4d, 0x0e, 0x2a, 0x98, 0x3e, 0x69, 0x7b, 0x22, 0x4b, 0xf9, 0x6a, 0x76, 0x63, 0x16, 0x66, 0x53, + 0x56, 0x29, 0x03, 0xe0, 0x7c, 0xe6, 0xff, 0x61, 0x25, 0xf2, 0x88, 0xf4, 0xfd, 0xb1, 0xe3, 0x7e, + 0xb4, 0xa9, 0xde, 0xf5, 0x94, 0x0d, 0xd8, 0xf0, 0xb8, 0x9f, 0x02, 0x83, 0x08, 0x26, 0xde, 0x47, + 0x25, 0x9f, 0xb4, 0x3b, 0x96, 0xee, 0x07, 0x67, 0x04, 0xae, 0x0e, 0xf8, 0x04, 0x5b, 0x82, 0x1c, + 0x9f, 0xa5, 0x82, 0x5f, 0x20, 0xd9, 0xe0, 0x36, 0x1a, 0xf3, 0xf8, 0x3e, 0x89, 0xd0, 0xb3, 0x95, + 0x01, 0x39, 0x06, 0xbb, 0x2e, 0xcc, 0x79, 0x88, 0x1f, 0x10, 0xf0, 0xc0, 0x5f, 0x44, 0x85, 0xb6, + 0x69, 0x9b, 0x0e, 0xcb, 0x8e, 0x54, 0x2e, 0xbf, 0x91, 0xad, 0x21, 0x2d, 0xac, 0x53, 0xda, 0x7c, + 0x1a, 0x90, 0xef, 0x8b, 0xb5, 0x01, 0x67, 0xcb, 0x0e, 0x06, 0x1a, 0x22, 0xa8, 0x16, 0x31, 0xfa, + 0xe7, 0x32, 0x96, 0x41, 0xc6, 0xec, 0xd1, 0xd9, 0x28, 0x68, 0x06, 0xc9, 0x1f, 0xdf, 0x43, 0xf9, + 0x1d, 0xd3, 0x22, 0x62, 0xdf, 0xf9, 0x56, 0xc6, 0x72, 0xac, 0x98, 0x16, 0xe1, 0x32, 0x84, 0x27, + 0x53, 0x4c, 0x8b, 0x00, 0xe3, 0xc9, 0x06, 0xc2, 0x25, 0x9c, 0x86, 0xd8, 0x74, 0xcb, 0x7a, 0x20, + 0x40, 0x90, 0x8f, 0x0d, 0x44, 0xd0, 0x0c, 0x92, 0x3f, 0xfe, 0x5a, 0x2e, 0xcc, 0x1a, 0xf2, 0xd3, + 0x9a, 0x6f, 0x66, 0x2c, 0x8b, 0xc8, 0xd5, 0x70, 0x51, 0x64, 0xd8, 0x9e, 0xc8, 0x23, 0xde, 0x43, + 0x79, 0xbd, 0xbd, 0xdf, 0x11, 0xa1, 0x4a, 0xd6, 0x6f, 0xa4, 0xd6, 0xde, 0xef, 0xc4, 0xde, 0x48, + 0x6d, 0x7d, 0xb3, 0x01, 0x8c, 0x27, 0x35, 0x8d, 0x3d, 0x7d, 0x67, 0x4f, 0xaf, 0xa2, 0xa1, 0x98, + 0xc6, 0x75, 0x4a, 0x3b, 0x66, 0x1a, 0xac, 0x0d, 0x38, 0x5b, 0xfa, 0xec, 0xed, 0x7d, 0xdf, 0xaf, + 0x56, 0x86, 0xf2, 0xec, 0xeb, 0xfb, 0xbe, 0x1f, 0x7b, 0xf6, 0xf5, 0xcd, 0xad, 0x2d, 0x60, 0x3c, + 0x29, 0x6f, 0x5b, 0xf7, 0x3d, 0x91, 0x84, 0xca, 0x9a, 0xf7, 0x86, 0xee, 0x7b, 0x31, 0xde, 0x1b, + 0xb5, 0x2d, 0x0d, 0x18, 0x4f, 0x7c, 0x1b, 0x8d, 0x7a, 0xb6, 0x57, 0x9d, 0x60, 0xac, 0x5f, 0xcf, + 0x98, 0xb5, 0x66, 0x0b, 0xce, 0xf2, 0xe8, 0x89, 0xb6, 0xa1, 0x01, 0x65, 0xc8, 0xf8, 0xee, 0x7b, + 0xd5, 0xc9, 0xe1, 0xf0, 0xdd, 0x4f, 0xf0, 0xdd, 0xa4, 0x7c, 0xf7, 0x3d, 0xfc, 0x95, 0x1c, 0x2a, + 0x76, 0xba, 0xdb, 0x5a, 0x77, 0xbb, 0x3a, 0xc5, 0x78, 0x7f, 0x36, 0x63, 0xde, 0x0d, 0x46, 0x9c, + 0xb3, 0x97, 0x31, 0x06, 0x6f, 0x04, 0xc1, 0x99, 0x09, 0xc1, 0xb9, 0x56, 0xa7, 0x87, 0x22, 0xc4, + 0x55, 0x46, 0x2d, 0x26, 0x04, 0x6f, 0x04, 0xc1, 0x39, 0x10, 0xc2, 0xd2, 0xb7, 0xab, 0x33, 0xc3, + 0x12, 0xc2, 0xd2, 0x53, 0x84, 0xb0, 0x74, 0x2e, 0x84, 0xa5, 0x6f, 0x53, 0xd5, 0xdf, 0x6d, 0xee, + 0x78, 0x55, 0x3c, 0x14, 0xd5, 0xbf, 0xd6, 0xdc, 0x89, 0xab, 0xfe, 0xb5, 0xe5, 0x15, 0x0d, 0x18, + 0x4f, 0xea, 0x72, 0x3c, 0x4b, 0x37, 0xf6, 0xaa, 0x67, 0x86, 0xe2, 0x72, 0x34, 0x4a, 0x3b, 0xe6, + 0x72, 0x58, 0x1b, 0x70, 0xb6, 0xf8, 0xf7, 0x72, 0xa8, 0x22, 0xce, 0x9e, 0x5d, 0x75, 0xcd, 0x66, + 0xf5, 0x6c, 0x36, 0x2b, 0xc4, 0xb8, 0x18, 0x21, 0x07, 0x2e, 0x8c, 0xcc, 0x2e, 0x28, 0x10, 0x50, + 0x05, 0xc1, 0x7f, 0x9a, 0x43, 0x93, 0x7a, 0xe4, 0x94, 0x61, 0xf5, 0x71, 0x26, 0xdb, 0x76, 0xd6, + 0x53, 0x42, 0xf4, 0x28, 0x23, 0x13, 0x4f, 0x66, 0x53, 0xa3, 0x40, 0x88, 0x49, 0xc4, 0xd4, 0xd7, + 0xf3, 0x5d, 0xb3, 0x43, 0xaa, 0xe7, 0x86, 0xa2, 0xbe, 0x1a, 0x23, 0x1e, 0x53, 0x5f, 0xde, 0x08, + 0x82, 0x33, 0x9b, 0xba, 0x09, 0x5f, 0x92, 0x57, 0x9f, 0x18, 0xca, 0xd4, 0x1d, 0x2c, 0xf8, 0xa3, + 0x53, 0xb7, 0x68, 0x85, 0x80, 0x39, 0xd5, 0x65, 0x97, 0x34, 0x4d, 0xaf, 0x5a, 0x1d, 0x8a, 0x2e, + 0x03, 0xa5, 0x1d, 0xd3, 0x65, 0xd6, 0x06, 0x9c, 0x2d, 0x75, 0xe7, 0xb6, 0xb7, 0x5f, 0x7d, 0x72, + 0x28, 0xee, 0x7c, 0xc3, 0xdb, 0x8f, 0xb9, 0xf3, 0x0d, 0x6d, 0x13, 0x28, 0x43, 0xe1, 0xce, 0x2d, + 0x4f, 0x77, 0xab, 0xb3, 0x43, 0x72, 0xe7, 0x94, 0x78, 0xc2, 0x9d, 0xd3, 0x46, 0x10, 0x9c, 0x99, + 0x16, 0xb0, 0xeb, 0x65, 0xa6, 0x51, 0xfd, 0xd8, 0x50, 0xb4, 0xe0, 0x2a, 0xa7, 0x1e, 0xd3, 0x02, + 0xd1, 0x0a, 0x01, 0x73, 0xfc, 0x0c, 0x8d, 0x6a, 0x3b, 0x96, 0x69, 0xe8, 0x5e, 0xf5, 0x29, 0x76, + 0xf2, 0x70, 0x9c, 0xc7, 0x9c, 0xbc, 0x0d, 0x24, 0x14, 0x7f, 0x2f, 0x87, 0xa6, 0x62, 0x7b, 0x6c, + 0xd5, 0xf3, 0x4c, 0x74, 0x23, 0x63, 0xd1, 0xeb, 0x51, 0x2e, 0xfc, 0x11, 0xe4, 0x61, 0x8d, 0xf8, + 0x0e, 0x4d, 0x5c, 0x28, 0xfc, 0x8d, 0x1c, 0x2a, 0xcb, 0xb6, 0xea, 0x05, 0x26, 0xe2, 0x5b, 0xc3, + 0x12, 0x91, 0x0b, 0x27, 0x8f, 0x1e, 0x86, 0xa7, 0x0c, 0x42, 0x11, 0x98, 0xd7, 0x66, 0x3a, 0xaf, + 0xf9, 0x2e, 0xd1, 0xdb, 0xd5, 0xb9, 0xa1, 0x78, 0x6d, 0x08, 0x39, 0xc4, 0xbc, 0xb6, 0x02, 0x01, + 0x55, 0x10, 0xf6, 0x4a, 0xf5, 0xe8, 0xc9, 0xbf, 0xea, 0xc5, 0xa1, 0xbc, 0xd2, 0xf8, 0xf9, 0xc2, + 0xe8, 0x2b, 0x8d, 0x41, 0x21, 0x2e, 0x14, 0xfe, 0x9b, 0x1c, 0x9a, 0xd1, 0xe3, 0xc7, 0x84, 0xab, + 0xff, 0x8f, 0x89, 0x4a, 0x86, 0x21, 0x6a, 0xe4, 0x38, 0x32, 0x13, 0xf6, 0x49, 0x21, 0xec, 0x4c, + 0x02, 0x0e, 0x49, 0xd1, 0x68, 0x90, 0xe2, 0xed, 0xf8, 0x9d, 0xea, 0xfc, 0x50, 0x82, 0x14, 0x6d, + 0xc7, 0x8f, 0xaf, 0x8b, 0xb4, 0x95, 0xad, 0x06, 0x30, 0x9e, 0x3c, 0x4a, 0x23, 0xae, 0x6b, 0xfa, + 0xd5, 0xa7, 0x87, 0x13, 0xa5, 0x31, 0xe2, 0xf1, 0x28, 0x8d, 0x35, 0x82, 0xe0, 0x3c, 0xdb, 0x45, + 0x28, 0xcc, 0x2d, 0xa4, 0xe4, 0x6f, 0x37, 0xd5, 0xfc, 0x6d, 0xe5, 0xf2, 0xcb, 0x7d, 0x67, 0xd0, + 0xb5, 0x5f, 0xa9, 0xb9, 0xbe, 0xb9, 0xa3, 0x1b, 0xbe, 0x92, 0xfc, 0x9d, 0xfd, 0x66, 0x0e, 0x4d, + 0x44, 0xf2, 0x09, 0x29, 0xac, 0x77, 0xa3, 0xac, 0x21, 0xfb, 0x2d, 0x47, 0x55, 0xa2, 0xdf, 0xcc, + 0xa1, 0xb2, 0xcc, 0x2c, 0xa4, 0x48, 0xd3, 0x8c, 0x4a, 0x33, 0x68, 0xa6, 0x94, 0xb1, 0x4a, 0x97, + 0x84, 0x8e, 0x4d, 0x24, 0xc5, 0x30, 0xfc, 0xb1, 0x91, 0xec, 0xd2, 0x25, 0xfa, 0x20, 0x87, 0xc6, + 0xd5, 0x44, 0x43, 0x8a, 0x40, 0xad, 0xa8, 0x40, 0x9b, 0xd9, 0x1c, 0x8e, 0x3a, 0xe4, 0x5d, 0xc9, + 0x9c, 0xc3, 0xf0, 0xdf, 0x55, 0xec, 0x86, 0xac, 0x2a, 0xc9, 0xd7, 0x73, 0x08, 0x85, 0x09, 0x88, + 0x14, 0x51, 0x48, 0x54, 0x94, 0x41, 0xf7, 0xa8, 0x39, 0xaf, 0xde, 0xa3, 0x22, 0xb3, 0x11, 0xc3, + 0x1f, 0x95, 0xf5, 0xcd, 0xad, 0xad, 0x1e, 0x92, 0xfc, 0x56, 0x0e, 0x95, 0x65, 0x6e, 0x62, 0xf8, + 0x83, 0xb2, 0x51, 0xdb, 0xd2, 0xf8, 0xea, 0x21, 0x29, 0xca, 0x57, 0x73, 0xa8, 0x14, 0xe4, 0x2a, + 0x52, 0x24, 0x31, 0xa2, 0x92, 0x0c, 0x7a, 0xa6, 0x4f, 0xdb, 0xd0, 0x7a, 0x0c, 0x09, 0x93, 0x63, + 0xff, 0xc4, 0xe4, 0xd8, 0xec, 0x25, 0xc7, 0x7b, 0x39, 0x54, 0x51, 0xf2, 0x18, 0x29, 0xa2, 0xec, + 0x44, 0x45, 0x19, 0x74, 0x7b, 0x46, 0x30, 0xeb, 0x2d, 0x8d, 0x92, 0xd0, 0x18, 0xbe, 0x34, 0x82, + 0xd9, 0xa1, 0xd2, 0x04, 0x99, 0x8d, 0x13, 0x91, 0x86, 0x32, 0xeb, 0x6d, 0xce, 0x32, 0xcb, 0x31, + 0x7c, 0x73, 0xbe, 0xb6, 0xbc, 0xa2, 0x1d, 0xe2, 0xe4, 0xc2, 0x94, 0xc7, 0xf0, 0xed, 0x99, 0xf3, + 0x4a, 0x97, 0xe5, 0xdb, 0x39, 0x34, 0x1d, 0xcf, 0x7b, 0xa4, 0x48, 0xb4, 0x17, 0x95, 0x68, 0xd0, + 0x8b, 0xff, 0x2a, 0xc7, 0x74, 0xb9, 0xfe, 0x28, 0x87, 0xce, 0xa4, 0xe4, 0x3c, 0x52, 0x44, 0xb3, + 0xa3, 0xa2, 0xdd, 0x1a, 0xd6, 0x9d, 0xd1, 0xb8, 0x66, 0x2b, 0x49, 0x8f, 0xe1, 0x6b, 0xb6, 0x60, + 0xd6, 0x3b, 0x9c, 0x50, 0x93, 0x1f, 0xc3, 0x0f, 0x27, 0x92, 0x67, 0x2b, 0xe2, 0xfa, 0x1d, 0xa6, + 0x41, 0x86, 0xaf, 0xdf, 0x9c, 0x57, 0xef, 0x79, 0x22, 0x48, 0x8a, 0x0c, 0x7f, 0x9e, 0xd8, 0xd0, + 0x36, 0x0f, 0x9d, 0x27, 0x64, 0x82, 0xe4, 0x24, 0xe6, 0x09, 0xc6, 0xac, 0xb7, 0xc6, 0xa8, 0x89, + 0x92, 0xe1, 0x6b, 0x4c, 0xc0, 0x2d, 0x5d, 0x9e, 0xef, 0xe4, 0x94, 0xdb, 0x49, 0x4a, 0xf6, 0x23, + 0x45, 0x2e, 0x27, 0x2a, 0xd7, 0x1b, 0x43, 0x3b, 0x87, 0xac, 0xca, 0xf7, 0x61, 0x0e, 0x4d, 0x46, + 0x53, 0x1f, 0x29, 0x92, 0x99, 0x51, 0xc9, 0xb4, 0x21, 0xdc, 0x7c, 0x8a, 0x7b, 0xee, 0x78, 0xee, + 0x63, 0xf8, 0x9e, 0x5b, 0xe5, 0xd8, 0xfb, 0x5d, 0xa6, 0xa5, 0x3d, 0x86, 0xff, 0x2e, 0x7b, 0x5f, + 0xe6, 0x54, 0xe5, 0xfb, 0x6e, 0x0e, 0x9d, 0x4b, 0xcf, 0x75, 0xa4, 0x48, 0xb8, 0x1f, 0x95, 0xf0, + 0xcd, 0x21, 0x5e, 0xf9, 0x8e, 0xc7, 0x2a, 0x32, 0xd9, 0x31, 0xfc, 0x58, 0x45, 0x5b, 0xd9, 0x6a, + 0x1c, 0x16, 0xc3, 0x85, 0x79, 0x8f, 0x13, 0x88, 0xe1, 0x38, 0xb3, 0x54, 0x69, 0xe6, 0xfd, 0xc8, + 0x89, 0x23, 0x7e, 0x1c, 0x09, 0xbf, 0x23, 0x0f, 0x40, 0xf1, 0x73, 0x42, 0x9f, 0xec, 0x3f, 0xa7, + 0x72, 0xf8, 0x39, 0xa7, 0xbf, 0xcf, 0xa3, 0xa9, 0x58, 0x7e, 0x81, 0x95, 0x1e, 0xa1, 0x3f, 0x59, + 0x9d, 0xae, 0x5c, 0xf4, 0x1e, 0xf6, 0x95, 0x00, 0x00, 0x21, 0x0e, 0xfe, 0x30, 0x87, 0xa6, 0xee, + 0xe8, 0xbe, 0xb1, 0xdb, 0xd0, 0xfd, 0x5d, 0x7e, 0x58, 0x2d, 0xa3, 0xb7, 0xf7, 0x7a, 0x94, 0x6a, + 0x98, 0x5e, 0x8c, 0x01, 0x20, 0xce, 0x1f, 0x3f, 0x8b, 0xc6, 0x3a, 0x8e, 0x65, 0x99, 0x76, 0x4b, + 0x14, 0x5c, 0x91, 0xf9, 0xf2, 0x06, 0x6f, 0x86, 0x00, 0x1e, 0x2d, 0x94, 0x95, 0xcf, 0xe4, 0x18, + 0x48, 0x6c, 0x48, 0x8f, 0x75, 0x3a, 0xb3, 0xf0, 0x51, 0x39, 0x9d, 0xf9, 0x2f, 0x79, 0x84, 0x93, + 0x73, 0xe0, 0xc3, 0x4a, 0xc9, 0x5d, 0x42, 0x45, 0x23, 0x54, 0x15, 0xe5, 0x3c, 0xb5, 0x78, 0xa3, + 0x02, 0xca, 0x6f, 0x3a, 0x78, 0xc4, 0xe8, 0xba, 0x24, 0x59, 0x39, 0x88, 0xb7, 0x83, 0xc4, 0xe8, + 0xb3, 0x30, 0xc6, 0x07, 0xc9, 0xdb, 0x0a, 0xef, 0x64, 0x1e, 0x0c, 0xf4, 0xf1, 0xf2, 0x6f, 0xb2, + 0x42, 0x41, 0xbb, 0xe2, 0x36, 0x56, 0xb1, 0xef, 0x9b, 0xdd, 0x35, 0xd9, 0x19, 0x14, 0x42, 0xa7, + 0x53, 0x46, 0x63, 0x30, 0x9d, 0xfa, 0x71, 0x11, 0xcd, 0x24, 0xdc, 0xe5, 0x29, 0x5d, 0xac, 0x7c, + 0x0e, 0x95, 0xe8, 0x5f, 0xa5, 0x8e, 0x85, 0x7c, 0x87, 0xd7, 0x44, 0x3b, 0x48, 0x0c, 0xe5, 0xfe, + 0xe0, 0x68, 0xcf, 0xfb, 0x83, 0xb7, 0x22, 0x97, 0xa8, 0xb3, 0xac, 0x75, 0xf6, 0x32, 0x9a, 0xe0, + 0xd9, 0xfa, 0xe0, 0xa6, 0x5d, 0x21, 0x7a, 0xd3, 0xea, 0xaa, 0x0a, 0x84, 0x28, 0x6e, 0x8f, 0x7b, + 0x75, 0xc5, 0x63, 0xdd, 0xab, 0x7b, 0x3f, 0x59, 0xd0, 0xe2, 0xed, 0xac, 0xa7, 0xcf, 0x3e, 0x2c, + 0x4b, 0xbd, 0x94, 0x5a, 0x3a, 0xf4, 0x52, 0xea, 0x22, 0x2a, 0x7b, 0x9e, 0xf5, 0x1a, 0x71, 0xcd, + 0x9d, 0x03, 0x76, 0x21, 0x52, 0x29, 0xbc, 0xa5, 0x05, 0x00, 0x08, 0x71, 0x3e, 0x8a, 0xe7, 0xe9, + 0xff, 0x39, 0x87, 0x26, 0x79, 0x7a, 0xab, 0xd6, 0xe9, 0x2c, 0xb9, 0xa4, 0xe9, 0x51, 0xd7, 0xd3, + 0x71, 0xcd, 0xdb, 0xba, 0x4f, 0x82, 0xab, 0x70, 0xfd, 0xb9, 0x9e, 0x86, 0xec, 0x0c, 0x0a, 0x21, + 0xfc, 0x34, 0x2a, 0xe8, 0x9d, 0xce, 0xea, 0x32, 0x93, 0x61, 0x34, 0x3c, 0x36, 0x50, 0xa3, 0x8d, + 0xc0, 0x61, 0xf8, 0x15, 0x34, 0x69, 0xda, 0x9e, 0xaf, 0x5b, 0x16, 0x3b, 0x73, 0xbf, 0xba, 0xcc, + 0x1c, 0xfd, 0x68, 0x78, 0x08, 0x64, 0x35, 0x02, 0x85, 0x18, 0xf6, 0xfc, 0xcf, 0xc6, 0xd1, 0x4c, + 0x22, 0x5b, 0x87, 0x67, 0xd1, 0x88, 0xc9, 0x2f, 0x29, 0x8d, 0xd6, 0x91, 0xa0, 0x34, 0xb2, 0xba, + 0x0c, 0x23, 0x66, 0x53, 0x75, 0x24, 0x23, 0x27, 0xe7, 0x48, 0x64, 0xad, 0x82, 0xd1, 0xa3, 0xd6, + 0x2a, 0x08, 0xef, 0x0e, 0x8a, 0xbb, 0x77, 0x29, 0x17, 0xba, 0xc3, 0xfb, 0x86, 0xa0, 0xe0, 0x1f, + 0xa9, 0x78, 0xc2, 0x0d, 0x54, 0xd2, 0x3b, 0x26, 0xbf, 0x57, 0x5c, 0xec, 0xfb, 0xbe, 0x4f, 0xad, + 0xb1, 0xca, 0x2f, 0x15, 0x4b, 0x22, 0xc9, 0x1b, 0xc5, 0x63, 0xd9, 0xde, 0x28, 0x56, 0x83, 0x81, + 0xd2, 0x43, 0x83, 0x81, 0x4b, 0xa8, 0xa8, 0x1b, 0xbe, 0x79, 0x9b, 0x08, 0x3b, 0x96, 0x21, 0x46, + 0x8d, 0xb5, 0x82, 0x80, 0x8a, 0x72, 0xbf, 0x7e, 0x10, 0xf2, 0xa2, 0x44, 0xb9, 0xdf, 0x00, 0x04, + 0x2a, 0x1e, 0xf3, 0xb5, 0x4c, 0x69, 0x02, 0x5f, 0x5b, 0x89, 0xf9, 0x5a, 0x15, 0x08, 0x51, 0x5c, + 0x5c, 0x43, 0x53, 0xbc, 0xe1, 0x66, 0xc7, 0x72, 0xf4, 0x26, 0xed, 0x3e, 0x1e, 0xd5, 0x8a, 0xab, + 0x51, 0x30, 0xc4, 0xf1, 0x7b, 0xb8, 0xeb, 0x89, 0xc1, 0xdd, 0xf5, 0x64, 0x36, 0xee, 0x3a, 0x6e, + 0x91, 0x7d, 0xb8, 0xeb, 0x77, 0xe3, 0x95, 0x01, 0xf8, 0x29, 0xcd, 0x41, 0x5d, 0x2b, 0x35, 0xaf, + 0xa6, 0x7a, 0xf7, 0xff, 0x48, 0x15, 0x01, 0x3e, 0x89, 0x26, 0x1c, 0xb7, 0xa5, 0xdb, 0xe6, 0x3d, + 0xe6, 0x70, 0x3c, 0x76, 0x5a, 0xb3, 0xcc, 0xb5, 0xf5, 0x86, 0x0a, 0x80, 0x28, 0x1e, 0xbe, 0x87, + 0xca, 0xad, 0xc0, 0xcb, 0x56, 0x67, 0x32, 0xf1, 0x33, 0x51, 0xaf, 0xcd, 0xaf, 0x07, 0xc9, 0x36, + 0x08, 0xd9, 0x29, 0xb3, 0x12, 0x3e, 0xc1, 0xfb, 0xe3, 0x7f, 0x9c, 0x43, 0x33, 0x1d, 0xfd, 0x80, + 0x6a, 0xe8, 0x15, 0xdb, 0x35, 0x8d, 0xdd, 0x36, 0xb1, 0xfd, 0xea, 0x99, 0x4c, 0xd2, 0x33, 0x8d, + 0x38, 0xdd, 0x15, 0x4b, 0x6f, 0x79, 0xe1, 0x21, 0x8e, 0x04, 0x1c, 0x92, 0xa2, 0x0c, 0x36, 0x6d, + 0xbe, 0x5b, 0x62, 0xf3, 0x4c, 0x74, 0x1f, 0xe6, 0x94, 0x82, 0xd2, 0x4f, 0xa1, 0xb2, 0x08, 0x59, + 0xc4, 0xe4, 0x5a, 0xae, 0x7f, 0x4c, 0x8c, 0xc4, 0x99, 0x44, 0xb1, 0x8f, 0xd5, 0x65, 0x08, 0xb1, + 0x8f, 0x18, 0xa1, 0x46, 0x8a, 0x4e, 0xe4, 0xb3, 0x2b, 0x3a, 0xa1, 0xa1, 0xc7, 0xf9, 0x05, 0x61, + 0x4d, 0x5b, 0x63, 0x11, 0x94, 0x69, 0xf0, 0xfb, 0xc1, 0xbc, 0x3c, 0xe1, 0x79, 0xf1, 0x10, 0x8f, + 0x5f, 0x49, 0x43, 0x82, 0xf4, 0xbe, 0xc2, 0x15, 0x5b, 0xba, 0x74, 0xc5, 0xc5, 0x84, 0x2b, 0x0e, + 0x81, 0x10, 0xc5, 0xed, 0xe1, 0x47, 0x4b, 0x83, 0xfb, 0xd1, 0x72, 0x56, 0x7e, 0x34, 0xaa, 0x71, + 0xc7, 0x0c, 0x7b, 0xd1, 0xa1, 0x61, 0xef, 0x2d, 0x54, 0xf1, 0xd8, 0x9b, 0xe4, 0x2f, 0xbc, 0xd2, + 0xf7, 0x0b, 0xd7, 0xc2, 0xde, 0xa0, 0x92, 0x52, 0x3c, 0xd1, 0xf8, 0x09, 0x7a, 0xa2, 0x79, 0x54, + 0x6c, 0xb9, 0x4e, 0xb7, 0xc3, 0x2f, 0x35, 0x08, 0x25, 0xbf, 0xca, 0x5a, 0x40, 0x40, 0x06, 0x73, + 0x06, 0xdf, 0x29, 0xa3, 0xa9, 0xd8, 0x46, 0x68, 0x6a, 0x22, 0x2c, 0x77, 0xca, 0x89, 0xb0, 0x8b, + 0x28, 0xef, 0xd3, 0xa8, 0x66, 0x24, 0x7a, 0x6d, 0x9e, 0x85, 0x33, 0x0c, 0x92, 0xac, 0xce, 0x31, + 0x7a, 0xf4, 0xea, 0x1c, 0xf8, 0x97, 0x50, 0x59, 0x6f, 0x36, 0x5d, 0xe2, 0x79, 0x24, 0x28, 0xf7, + 0xc3, 0x26, 0xa5, 0x5a, 0xd0, 0x08, 0x21, 0x9c, 0xad, 0xa4, 0x9b, 0x3b, 0xde, 0x4d, 0x4f, 0xa4, + 0xb7, 0xd4, 0x95, 0xf4, 0xf2, 0x8a, 0x46, 0xdb, 0x41, 0x62, 0xe0, 0x26, 0x9a, 0xda, 0x73, 0xb7, + 0x97, 0x96, 0x74, 0x63, 0x97, 0x1c, 0x27, 0x25, 0xc2, 0xca, 0xf8, 0x5e, 0x8f, 0x52, 0x80, 0x38, + 0x49, 0xc1, 0xe5, 0x3a, 0x39, 0xf0, 0xf5, 0xed, 0xe3, 0x04, 0xad, 0x01, 0x17, 0x95, 0x02, 0xc4, + 0x49, 0xd2, 0x10, 0x73, 0xcf, 0xdd, 0x0e, 0xae, 0xdc, 0x8b, 0xb2, 0x61, 0x32, 0xc4, 0xbc, 0x1e, + 0x82, 0x40, 0xc5, 0xa3, 0x03, 0xb6, 0xe7, 0x6e, 0x03, 0xd1, 0xad, 0xb6, 0xa8, 0x7c, 0x28, 0x07, + 0xec, 0xba, 0x68, 0x07, 0x89, 0x81, 0x3b, 0x08, 0xd3, 0xa7, 0x63, 0xef, 0x5d, 0xde, 0x19, 0x16, + 0xab, 0xd2, 0x67, 0xd2, 0x9e, 0x46, 0x22, 0xa9, 0x0f, 0x74, 0x8e, 0xba, 0xbb, 0xeb, 0x09, 0x3a, + 0x90, 0x42, 0x1b, 0xbf, 0x81, 0x9e, 0xd8, 0x73, 0xb7, 0xc5, 0xbe, 0x44, 0xc3, 0x35, 0x6d, 0xc3, + 0xec, 0xe8, 0xbc, 0x88, 0x41, 0x25, 0x5a, 0xa8, 0xf1, 0x7a, 0x3a, 0x1a, 0xf4, 0xea, 0x1f, 0xcd, + 0xca, 0x8e, 0x67, 0x92, 0x95, 0x8d, 0x99, 0xeb, 0xa3, 0x5e, 0x8d, 0x67, 0x30, 0xff, 0xf4, 0xfd, + 0x1c, 0xc2, 0xec, 0x08, 0x58, 0xf0, 0xb9, 0x12, 0xe6, 0xfc, 0xf0, 0x22, 0x2a, 0x33, 0xef, 0xa7, + 0xdc, 0xca, 0x95, 0xe9, 0x8d, 0xab, 0x01, 0x00, 0x42, 0x1c, 0xba, 0x88, 0x72, 0xac, 0x26, 0x91, + 0xa5, 0x34, 0xe4, 0x22, 0xea, 0x06, 0x6b, 0x05, 0x01, 0xc5, 0x57, 0xd1, 0x8c, 0x4b, 0xb6, 0x75, + 0x4b, 0xb7, 0x0d, 0xa2, 0xf9, 0xae, 0xee, 0x93, 0xd6, 0x81, 0xf0, 0x24, 0x32, 0x44, 0x83, 0x38, + 0x02, 0x24, 0xfb, 0xcc, 0xff, 0x5b, 0x09, 0x4d, 0xc7, 0xcf, 0xae, 0x3d, 0x2c, 0x99, 0xbc, 0x88, + 0xca, 0x1d, 0xdd, 0xf5, 0x4d, 0xa5, 0xd0, 0x88, 0x7c, 0xaa, 0x46, 0x00, 0x80, 0x10, 0x07, 0x3f, + 0x8d, 0x0a, 0xac, 0x8e, 0xac, 0x90, 0x50, 0xe6, 0x25, 0x58, 0x9d, 0x59, 0xe0, 0xb0, 0xf4, 0xea, + 0x15, 0xf9, 0x13, 0xab, 0x5e, 0xf1, 0x48, 0x14, 0xa6, 0x7d, 0x2f, 0x99, 0xc7, 0x7b, 0x2b, 0xe3, + 0x83, 0x89, 0xfd, 0xad, 0x0b, 0x27, 0x0c, 0x55, 0x9f, 0x45, 0xb5, 0x8e, 0xcd, 0x2c, 0x44, 0x8a, + 0x18, 0x0a, 0x5f, 0xde, 0x45, 0x9a, 0x20, 0xca, 0x1a, 0x37, 0xd0, 0x59, 0xcb, 0x6c, 0x8b, 0x8c, + 0xa4, 0xd7, 0x20, 0x2e, 0x2f, 0xdf, 0xcc, 0x1c, 0xf5, 0x68, 0x98, 0xa9, 0x59, 0x4b, 0xc1, 0x81, + 0xd4, 0x9e, 0xf8, 0x59, 0x34, 0x76, 0x9b, 0xb8, 0xac, 0xba, 0x00, 0x8a, 0x96, 0x94, 0x7f, 0x8d, + 0x37, 0x43, 0x00, 0xc7, 0x6f, 0xa0, 0xbc, 0xa7, 0x7b, 0x96, 0x08, 0xd4, 0x8e, 0x71, 0xd6, 0xba, + 0xa6, 0xad, 0x09, 0xf5, 0x60, 0x39, 0x64, 0xfa, 0x1b, 0x18, 0xc9, 0x53, 0x0a, 0xd8, 0xc2, 0xfd, + 0xa0, 0x89, 0xc3, 0xf6, 0x83, 0x06, 0x73, 0x8a, 0xdf, 0x2d, 0xa2, 0xa9, 0xd8, 0x61, 0xd4, 0x87, + 0xb9, 0x16, 0xe9, 0x29, 0x46, 0x0e, 0xf1, 0x14, 0xcf, 0xa1, 0x92, 0x61, 0x99, 0xc4, 0xf6, 0x57, + 0x9b, 0xc2, 0xa3, 0x84, 0x77, 0xde, 0x79, 0xfb, 0x32, 0x48, 0x8c, 0xd3, 0xf6, 0x2b, 0xaa, 0x03, + 0x28, 0x1c, 0xb5, 0x2a, 0x4e, 0x71, 0x98, 0x5f, 0x27, 0xca, 0xe6, 0xee, 0x7d, 0xec, 0xc5, 0x3e, + 0xf2, 0x55, 0xae, 0x83, 0x5d, 0xa0, 0x72, 0xd6, 0xbb, 0x40, 0x83, 0xd9, 0xc8, 0x3f, 0x8d, 0xa0, + 0xd2, 0x46, 0x6d, 0x4b, 0x63, 0xd5, 0x9f, 0xdf, 0x8c, 0xd6, 0xb7, 0x1e, 0x44, 0xc8, 0x64, 0x21, + 0xeb, 0x15, 0x6a, 0x5a, 0x7d, 0xd7, 0xb0, 0x2e, 0x73, 0xeb, 0xa3, 0xeb, 0x4c, 0xde, 0x1d, 0x2f, + 0xa1, 0xbc, 0xbd, 0xd7, 0xef, 0x47, 0x3e, 0xd8, 0x98, 0x6d, 0x5c, 0x27, 0x07, 0xc0, 0x3a, 0xe3, + 0x9b, 0x08, 0x19, 0x2e, 0x69, 0x12, 0xdb, 0x37, 0xc5, 0x37, 0xd6, 0xfa, 0xdb, 0x00, 0x59, 0x92, + 0x9d, 0x41, 0x21, 0x34, 0xff, 0x97, 0x45, 0x34, 0x1d, 0x3f, 0x74, 0xfe, 0x30, 0x97, 0xf3, 0x2c, + 0x1a, 0xf3, 0xba, 0xac, 0x02, 0x8f, 0x70, 0x3a, 0x72, 0x1a, 0xd0, 0x78, 0x33, 0x04, 0xf0, 0x74, + 0x57, 0x32, 0x7a, 0x2a, 0xae, 0x24, 0x7f, 0x54, 0x57, 0x92, 0x75, 0x40, 0xf3, 0x5e, 0xf2, 0xfb, + 0x15, 0x6f, 0x65, 0x7c, 0x4d, 0xa0, 0x0f, 0x5f, 0x42, 0x84, 0x55, 0x8f, 0x65, 0x52, 0xbb, 0x26, + 0x30, 0xc4, 0xc4, 0x46, 0xef, 0xe9, 0xb8, 0xac, 0x39, 0x54, 0x60, 0xdf, 0x6b, 0x10, 0x8b, 0x51, + 0x66, 0x8a, 0xec, 0xcc, 0x17, 0xf0, 0xf6, 0x01, 0xcb, 0xeb, 0x17, 0xd0, 0x64, 0xf4, 0x98, 0x29, + 0x5d, 0x37, 0xef, 0x3a, 0x9e, 0x2f, 0xb2, 0x09, 0xf1, 0x2f, 0x31, 0x5e, 0x0b, 0x41, 0xa0, 0xe2, + 0x1d, 0x6d, 0xd2, 0x7e, 0x16, 0x8d, 0x89, 0x6a, 0x7a, 0x62, 0xce, 0x96, 0x66, 0x26, 0x2a, 0xee, + 0x41, 0x00, 0xff, 0xbf, 0x19, 0xdb, 0xf2, 0xf0, 0xd7, 0x93, 0x33, 0xf6, 0x9b, 0x99, 0x9e, 0x29, + 0x7e, 0xd4, 0x27, 0xec, 0xc1, 0x94, 0xfb, 0x0d, 0x34, 0x93, 0xd8, 0x7e, 0x3a, 0x5a, 0xb5, 0xf2, + 0x39, 0x54, 0xb0, 0xf5, 0x36, 0xe1, 0x05, 0xbd, 0x84, 0xd1, 0xb1, 0x0f, 0x5a, 0x00, 0x6f, 0x9f, + 0xf7, 0xd0, 0xb9, 0xf4, 0xcd, 0x11, 0xf6, 0x21, 0x0d, 0xe2, 0x1b, 0xbb, 0x0d, 0xb8, 0x61, 0x37, + 0x60, 0xc9, 0x69, 0x53, 0x50, 0xad, 0xd9, 0x94, 0xd5, 0x3b, 0xc3, 0x0f, 0x69, 0xa4, 0xa3, 0x41, + 0xaf, 0xfe, 0xf3, 0xdf, 0x2b, 0xa2, 0x99, 0xc4, 0x85, 0x1d, 0xb6, 0x10, 0x97, 0xdb, 0x12, 0xb1, + 0xf4, 0x42, 0xea, 0x66, 0xc4, 0x2b, 0x68, 0x92, 0x59, 0x63, 0x23, 0xb6, 0x99, 0x21, 0xf7, 0xfe, + 0xb7, 0x22, 0x50, 0x88, 0x61, 0x1f, 0x6d, 0x21, 0xff, 0x0a, 0x9a, 0x54, 0x3f, 0xfb, 0xb2, 0xba, + 0x2c, 0x76, 0xd3, 0x25, 0x13, 0x2d, 0x02, 0x85, 0x18, 0x36, 0xfb, 0x66, 0x8e, 0x9c, 0xd2, 0x45, + 0x92, 0xb0, 0xd0, 0xff, 0x37, 0x73, 0x62, 0x24, 0x20, 0x41, 0x14, 0x6f, 0xa3, 0x59, 0xbe, 0xa9, + 0xa0, 0x0a, 0x14, 0x3b, 0x89, 0x33, 0x2f, 0x84, 0x9e, 0x5d, 0xee, 0x89, 0x09, 0x87, 0x50, 0xe9, + 0xb3, 0x28, 0xe6, 0xfb, 0xc9, 0xaf, 0x88, 0xbe, 0x9d, 0xf5, 0x35, 0xaf, 0x63, 0x19, 0x7e, 0xf9, + 0xa3, 0x62, 0xf8, 0xdf, 0xab, 0x50, 0x43, 0x89, 0xdd, 0x58, 0xc0, 0xf3, 0xa8, 0xc8, 0x74, 0x93, + 0xce, 0x69, 0x72, 0x7f, 0x82, 0x29, 0xad, 0x07, 0x02, 0x72, 0x84, 0xd4, 0xbd, 0x08, 0x24, 0x47, + 0x7b, 0x04, 0x92, 0x1d, 0x74, 0xc6, 0xb7, 0xbc, 0x2d, 0xb7, 0xeb, 0xf9, 0x4b, 0xc4, 0xf5, 0x3d, + 0xa1, 0xba, 0xf9, 0xbe, 0x3f, 0xbd, 0xb7, 0xb5, 0xa6, 0xc5, 0xa9, 0x40, 0x1a, 0x69, 0xaa, 0xc0, + 0xbe, 0xe5, 0xd5, 0x2c, 0xcb, 0xb9, 0x13, 0x1c, 0xc8, 0x08, 0x67, 0x38, 0x31, 0x77, 0x49, 0x05, + 0xde, 0x5a, 0xd3, 0x7a, 0x60, 0xc2, 0x21, 0x54, 0xf0, 0x3a, 0x7b, 0xaa, 0xd7, 0x74, 0xcb, 0x6c, + 0xea, 0x3e, 0xa1, 0x31, 0x00, 0xcb, 0xa9, 0x73, 0xeb, 0x90, 0x9b, 0xa0, 0x5b, 0x6b, 0x5a, 0x1c, + 0x05, 0xd2, 0xfa, 0x0d, 0xeb, 0xf3, 0xbb, 0xa9, 0x21, 0x43, 0xe9, 0x54, 0x42, 0x86, 0x72, 0x7f, + 0x56, 0x8e, 0x32, 0xb2, 0xf2, 0x98, 0xca, 0xf7, 0x61, 0xe5, 0x4d, 0x34, 0x25, 0xbf, 0x4b, 0x24, + 0x74, 0xb6, 0xd2, 0xf7, 0x9e, 0x4c, 0x2d, 0x4a, 0x01, 0xe2, 0x24, 0x4f, 0x29, 0xcf, 0xf5, 0xd7, + 0x39, 0x34, 0x4d, 0x25, 0xa9, 0xf9, 0xbb, 0xc4, 0xbe, 0xd7, 0xd0, 0x5d, 0xbd, 0x1d, 0x14, 0x5e, + 0xdb, 0xc9, 0x7c, 0xc8, 0x6b, 0x31, 0x46, 0x7c, 0xe8, 0x65, 0x35, 0xec, 0x38, 0x18, 0x12, 0x92, + 0xd1, 0xa9, 0x2f, 0x6c, 0x3b, 0xce, 0x37, 0x74, 0xcf, 0x46, 0x19, 0x05, 0x53, 0x5f, 0x9c, 0xe8, + 0x40, 0x3e, 0x76, 0x76, 0x09, 0x3d, 0x9e, 0xfa, 0xa8, 0x7d, 0x39, 0xea, 0xaf, 0x16, 0xc5, 0xad, + 0xa3, 0x0c, 0x16, 0x20, 0x59, 0x7f, 0xe4, 0x8a, 0x06, 0x56, 0xb6, 0xfc, 0x08, 0x5a, 0xec, 0xe3, + 0x78, 0xe1, 0x67, 0xcf, 0x42, 0x1c, 0x3c, 0x8b, 0x46, 0x9a, 0xdb, 0xcc, 0xd5, 0x17, 0xc2, 0xe3, + 0x8f, 0xcb, 0x75, 0x18, 0x69, 0x6e, 0xe3, 0x67, 0x50, 0x49, 0xac, 0x6c, 0x82, 0xd3, 0x81, 0x8c, + 0xad, 0x58, 0xf6, 0x78, 0x20, 0xa1, 0xc3, 0x5a, 0x4b, 0x0c, 0x61, 0x57, 0x21, 0xfe, 0xe6, 0x1e, + 0xf9, 0xf4, 0x5f, 0x7f, 0x1e, 0xfa, 0x39, 0xa5, 0xd6, 0x3b, 0x8a, 0x66, 0x98, 0x93, 0x85, 0xdc, + 0x07, 0x0b, 0x58, 0xfe, 0xae, 0x88, 0xce, 0xa5, 0xdf, 0x85, 0x7b, 0x64, 0xac, 0x81, 0x2b, 0xf7, + 0x68, 0xaa, 0x72, 0x7f, 0x1c, 0x8d, 0x79, 0x4c, 0xf0, 0xe0, 0x3c, 0x02, 0xaf, 0xc2, 0xcb, 0x9b, + 0x20, 0x80, 0xe1, 0x57, 0x11, 0x6e, 0xeb, 0x77, 0xd7, 0xbd, 0xd6, 0x92, 0xd3, 0x65, 0x85, 0xc5, + 0x81, 0xe8, 0xbc, 0xea, 0x7d, 0x21, 0x3c, 0xf5, 0xb3, 0x9e, 0xc0, 0x80, 0x94, 0x5e, 0xec, 0x04, + 0x45, 0x64, 0x57, 0x2a, 0x76, 0xfc, 0xe8, 0xd0, 0x6d, 0xa4, 0x21, 0xc5, 0x1f, 0x1f, 0x26, 0x03, + 0x77, 0x63, 0x28, 0x17, 0x24, 0x1f, 0xf5, 0xe8, 0xfd, 0x24, 0x4d, 0xe7, 0x6b, 0x05, 0x74, 0x26, + 0xa5, 0x40, 0x4e, 0xd4, 0x7b, 0xe7, 0x8e, 0xe0, 0xbd, 0xf7, 0xe5, 0x48, 0x65, 0x73, 0x3e, 0x3d, + 0x10, 0xea, 0x90, 0x61, 0x7a, 0x3f, 0x87, 0xce, 0xb2, 0x6d, 0xff, 0x60, 0xaf, 0x31, 0xa8, 0x8c, + 0x3c, 0x2a, 0x34, 0xf3, 0x48, 0x25, 0xca, 0xaf, 0xa6, 0x50, 0x08, 0xf7, 0x42, 0xd3, 0xa0, 0x90, + 0xca, 0x15, 0x2f, 0x21, 0x24, 0x6f, 0x18, 0x06, 0x96, 0xfc, 0x34, 0x2b, 0xb4, 0x2e, 0x5b, 0xff, + 0x87, 0x1d, 0x29, 0x50, 0x46, 0x9b, 0xad, 0x8c, 0x94, 0x6e, 0xc3, 0xf8, 0x1c, 0x4d, 0xca, 0xeb, + 0xed, 0xc3, 0x02, 0x9e, 0x45, 0x63, 0x86, 0xd5, 0xf5, 0x7c, 0xf9, 0xc1, 0xc2, 0x30, 0xe7, 0xc8, + 0x9b, 0x21, 0x80, 0x0f, 0xa6, 0x88, 0x7f, 0x35, 0x8a, 0x26, 0xa3, 0xef, 0x1c, 0x5f, 0x42, 0xc5, + 0x8e, 0x4b, 0x76, 0xcc, 0xbb, 0xf1, 0x0f, 0x98, 0x34, 0x58, 0x2b, 0x08, 0x28, 0x76, 0x50, 0xd1, + 0xd2, 0xb7, 0x69, 0x68, 0xc0, 0x0b, 0xc8, 0x5f, 0x1d, 0xb8, 0x18, 0x7a, 0xb0, 0x4d, 0x12, 0x30, + 0x5c, 0x63, 0xe4, 0x41, 0xb0, 0xa1, 0x0c, 0x77, 0x4c, 0x62, 0x35, 0xf9, 0x79, 0xd4, 0x61, 0x30, + 0x5c, 0x61, 0xe4, 0x41, 0xb0, 0xc1, 0x6f, 0xa2, 0x32, 0xff, 0xea, 0x4b, 0xb3, 0x7e, 0x20, 0x16, + 0xc3, 0xff, 0xff, 0x68, 0xda, 0xbd, 0x65, 0xb6, 0x49, 0x68, 0xb9, 0x4b, 0x01, 0x11, 0x08, 0xe9, + 0xb1, 0x8f, 0xfd, 0xef, 0xf8, 0xc4, 0xd5, 0x7c, 0xdd, 0x0d, 0xbe, 0xc5, 0x1f, 0x7e, 0xec, 0x5f, + 0x42, 0x40, 0xc1, 0x9a, 0xff, 0xdb, 0x31, 0x34, 0x15, 0xbb, 0xa8, 0xfc, 0x8b, 0x71, 0x0b, 0x57, + 0xfd, 0x42, 0xcd, 0x68, 0xd6, 0x5f, 0xa8, 0xc9, 0x67, 0x11, 0x49, 0xbc, 0x89, 0xc6, 0x3d, 0x6f, + 0x97, 0x61, 0xf6, 0x9f, 0xd6, 0x9b, 0x7e, 0x70, 0x7f, 0x6e, 0x5c, 0xd3, 0xae, 0xc9, 0xee, 0x10, + 0x21, 0x86, 0xd7, 0xd0, 0x98, 0x38, 0xfc, 0xd8, 0xdf, 0xc9, 0x45, 0x16, 0xb1, 0x04, 0x91, 0x54, + 0x40, 0x62, 0x18, 0x5b, 0xe6, 0x31, 0xa5, 0x7b, 0xe4, 0x63, 0xe6, 0x06, 0x3a, 0xdb, 0x71, 0x2c, + 0x2b, 0x38, 0x7d, 0x2a, 0xbf, 0x2d, 0x55, 0x8e, 0x5e, 0x8e, 0x6a, 0xa4, 0xe0, 0x40, 0x6a, 0xcf, + 0xc1, 0xbc, 0xec, 0xcf, 0x8a, 0x68, 0x32, 0x5a, 0xc7, 0xeb, 0xf4, 0xae, 0xa8, 0xb2, 0x9c, 0x61, + 0xcd, 0xb5, 0xe3, 0x57, 0x54, 0xb7, 0x44, 0x3b, 0x48, 0x0c, 0x0c, 0xa8, 0xcc, 0x4f, 0xe4, 0x5f, + 0xef, 0x77, 0xd3, 0x9c, 0x1f, 0xed, 0x0d, 0xfa, 0x42, 0x48, 0x86, 0xd2, 0xf4, 0x02, 0xf4, 0xfe, + 0x2c, 0x93, 0xd1, 0x94, 0xcd, 0x10, 0x92, 0xa1, 0x33, 0x96, 0x4b, 0x5a, 0x41, 0xe2, 0x50, 0x99, + 0xb1, 0x80, 0xb5, 0x82, 0x80, 0xd2, 0x49, 0xd5, 0x75, 0x2c, 0x52, 0x83, 0x8d, 0xf8, 0xa4, 0x0a, + 0xbc, 0x19, 0x02, 0xf8, 0x30, 0x36, 0xb1, 0xa2, 0x0a, 0xd0, 0x87, 0x09, 0x5d, 0x45, 0x33, 0xb7, + 0x45, 0x32, 0x52, 0x33, 0x5b, 0xb6, 0xee, 0x87, 0xb7, 0xda, 0xe4, 0x89, 0xc9, 0xd7, 0xe2, 0x08, + 0x90, 0xec, 0x73, 0x7a, 0x61, 0x35, 0xb1, 0x9b, 0x1d, 0xc7, 0xb4, 0xfd, 0x78, 0x58, 0x7d, 0x45, + 0xb4, 0x83, 0xc4, 0x18, 0xcc, 0xce, 0xfe, 0x71, 0x0c, 0x4d, 0x46, 0xeb, 0xd4, 0x45, 0x75, 0x38, + 0x37, 0x04, 0x1d, 0x1e, 0xc9, 0x5a, 0x87, 0x47, 0x0f, 0xd5, 0xe1, 0xa7, 0x83, 0x9d, 0xf5, 0x7c, + 0x74, 0x1f, 0x4b, 0xdd, 0x5d, 0xc7, 0x35, 0x3a, 0xc3, 0x9b, 0x3e, 0x8d, 0x42, 0xf8, 0x89, 0x41, + 0x7e, 0x98, 0x62, 0x54, 0x9d, 0x91, 0x23, 0x60, 0x88, 0xe3, 0xf7, 0x63, 0x2b, 0xfd, 0x6d, 0x14, + 0xbd, 0x82, 0x26, 0x99, 0x90, 0x35, 0xc3, 0xa0, 0x4b, 0xe3, 0xd5, 0xa6, 0x38, 0xe4, 0x2e, 0xf7, + 0xd8, 0x36, 0x55, 0xe8, 0x32, 0xc4, 0xb0, 0xa3, 0x96, 0x59, 0xce, 0xc6, 0x32, 0x37, 0x8f, 0x69, + 0x99, 0xe7, 0xd1, 0x68, 0xd3, 0xda, 0x67, 0x5a, 0x5d, 0x0a, 0xb7, 0x55, 0x96, 0xd7, 0x36, 0x81, + 0xb6, 0x2b, 0xf6, 0x56, 0x39, 0x25, 0x7b, 0x1b, 0x7f, 0x98, 0xbd, 0xb1, 0xb8, 0x86, 0x7f, 0x82, + 0x8a, 0x5f, 0xe8, 0x99, 0xe8, 0x3f, 0xae, 0x51, 0xba, 0x43, 0x84, 0xd8, 0x60, 0xc6, 0xfc, 0x25, + 0x54, 0x0a, 0x18, 0xd1, 0x81, 0x96, 0xfd, 0xc2, 0x81, 0xa6, 0x26, 0xc4, 0x88, 0x2c, 0xa2, 0xb2, + 0xd3, 0x21, 0x91, 0xef, 0x47, 0xca, 0x18, 0xf8, 0x46, 0x00, 0x80, 0x10, 0x87, 0x5a, 0x11, 0xe7, + 0x1a, 0xdb, 0x0d, 0x7e, 0x8d, 0x36, 0x0a, 0x21, 0xe6, 0xbf, 0x9c, 0x43, 0xc1, 0x47, 0x99, 0xf0, + 0x32, 0x2a, 0x74, 0x1c, 0xd7, 0xe7, 0xbb, 0x70, 0x95, 0xcb, 0x73, 0xe9, 0xe3, 0xc3, 0xaf, 0x27, + 0x38, 0xae, 0x1f, 0x52, 0xa4, 0xbf, 0x3c, 0xe0, 0x9d, 0xa9, 0x9c, 0x62, 0xd5, 0xb6, 0xda, 0x88, + 0xcb, 0xb9, 0x14, 0x00, 0x20, 0xc4, 0x99, 0xff, 0xaf, 0x3c, 0x9a, 0x8e, 0x97, 0x2e, 0xc4, 0x6f, + 0xa3, 0x09, 0xcf, 0x6c, 0xd9, 0xa6, 0xdd, 0x12, 0xb1, 0x68, 0xae, 0xef, 0xcb, 0xd3, 0x9a, 0xda, + 0x1f, 0xa2, 0xe4, 0x32, 0x3b, 0x6e, 0xa7, 0x84, 0x38, 0xa3, 0x27, 0x17, 0xe2, 0xbc, 0x97, 0xac, + 0xd2, 0xf3, 0x56, 0xc6, 0xc5, 0x23, 0x7f, 0xb1, 0xcb, 0xf4, 0xfc, 0xbc, 0x80, 0xce, 0xa5, 0x17, + 0xa7, 0x3c, 0xa5, 0xa0, 0x35, 0xbc, 0x87, 0x3a, 0xd2, 0xf3, 0x1e, 0x6a, 0x38, 0xce, 0xa3, 0x19, + 0x15, 0x9b, 0x94, 0x03, 0x70, 0xb8, 0xab, 0x95, 0xe1, 0x74, 0xfe, 0xa1, 0xe1, 0xf4, 0x25, 0x54, + 0x14, 0x1f, 0x26, 0x88, 0x85, 0xa9, 0x75, 0xfe, 0xd9, 0x00, 0x01, 0x55, 0x42, 0x81, 0xe2, 0xa1, + 0xa1, 0x00, 0x0d, 0x6d, 0x82, 0xad, 0xca, 0xfe, 0xee, 0xa2, 0xf1, 0xd0, 0x26, 0xe8, 0x0b, 0x21, + 0x19, 0x56, 0x0a, 0xa1, 0x63, 0xde, 0x84, 0x35, 0x31, 0x2b, 0x87, 0xa5, 0x10, 0x1a, 0xab, 0x37, + 0x61, 0x0d, 0x04, 0x34, 0x9a, 0x35, 0x2e, 0x67, 0x92, 0x35, 0x4e, 0xd7, 0xb9, 0xa3, 0xdb, 0xda, + 0x60, 0x5a, 0x6f, 0xa0, 0x99, 0xc4, 0x3b, 0x3f, 0x72, 0x2a, 0xec, 0x12, 0x2a, 0x7a, 0xdd, 0x1d, + 0x8a, 0x17, 0xab, 0x51, 0xa5, 0xb1, 0x56, 0x10, 0xd0, 0xf9, 0x6f, 0xe5, 0x29, 0x97, 0x58, 0x19, + 0xd3, 0x53, 0xb2, 0xaa, 0x97, 0xd1, 0x04, 0x4f, 0x46, 0xbd, 0xae, 0x14, 0x38, 0x29, 0x29, 0x7b, + 0x11, 0x2a, 0x10, 0xa2, 0xb8, 0x78, 0x95, 0xa9, 0x49, 0xdf, 0xcb, 0x42, 0x24, 0x34, 0x89, 0x4e, + 0xdc, 0x82, 0x00, 0x7e, 0x01, 0x55, 0xd8, 0x43, 0xf0, 0x21, 0x17, 0x09, 0x5c, 0x76, 0x53, 0xf8, + 0x4a, 0xd8, 0x0c, 0x2a, 0x4e, 0xf4, 0x14, 0x42, 0x21, 0x93, 0x53, 0x08, 0x89, 0xb7, 0x72, 0x52, + 0x7a, 0xf7, 0x8d, 0x12, 0x92, 0x9f, 0x9a, 0xc4, 0x46, 0xe2, 0x83, 0x9f, 0x9f, 0xea, 0x7b, 0x9f, + 0x27, 0x10, 0x85, 0x67, 0xb2, 0x52, 0xa6, 0xa4, 0x57, 0x11, 0x16, 0x5f, 0x98, 0x14, 0x41, 0xb5, + 0x52, 0xb0, 0x4a, 0x6e, 0x68, 0x69, 0x09, 0x0c, 0x48, 0xe9, 0x85, 0x5f, 0x65, 0x9f, 0xb7, 0xf5, + 0x75, 0xd3, 0x96, 0x9e, 0xf7, 0x7c, 0x8f, 0x0b, 0xa4, 0x1c, 0x49, 0x7e, 0xa8, 0x96, 0xff, 0x84, + 0xb0, 0x3b, 0xbe, 0x82, 0xc6, 0x6e, 0x3b, 0x56, 0xb7, 0x2d, 0xb2, 0xf8, 0x95, 0xcb, 0xb3, 0x69, + 0x94, 0x5e, 0x63, 0x28, 0xca, 0x85, 0x27, 0xde, 0x05, 0x82, 0xbe, 0x98, 0xa0, 0x29, 0x76, 0x12, + 0xc8, 0xf4, 0x0f, 0x84, 0x01, 0x88, 0xa9, 0xf7, 0x52, 0x1a, 0xb9, 0x86, 0xd3, 0xd4, 0xa2, 0xd8, + 0xfc, 0x50, 0x48, 0xac, 0x11, 0xe2, 0x34, 0xf1, 0x0a, 0x2a, 0xe9, 0x3b, 0x3b, 0xa6, 0x6d, 0xfa, + 0x07, 0x22, 0x67, 0xf7, 0x54, 0x1a, 0xfd, 0x9a, 0xc0, 0x11, 0x95, 0x70, 0xc4, 0x2f, 0x90, 0x7d, + 0xf1, 0x4d, 0x54, 0xf1, 0x1d, 0x4b, 0xc4, 0xa5, 0x9e, 0x48, 0x35, 0x5c, 0x48, 0x23, 0xb5, 0x25, + 0xd1, 0xc2, 0x9d, 0xd4, 0xb0, 0xcd, 0x03, 0x95, 0x0e, 0xfe, 0x9d, 0x1c, 0x1a, 0xb7, 0x9d, 0x26, + 0x09, 0x4c, 0x4f, 0xec, 0xec, 0xbd, 0x91, 0xd1, 0x27, 0x52, 0x17, 0x36, 0x14, 0xda, 0xdc, 0x42, + 0x64, 0x85, 0x14, 0x15, 0x04, 0x11, 0x21, 0xb0, 0x8d, 0xa6, 0xcd, 0xb6, 0xde, 0x22, 0x8d, 0xae, + 0x25, 0x4e, 0x32, 0x7a, 0x62, 0xf2, 0x48, 0xbd, 0x76, 0xbc, 0xe6, 0x18, 0xba, 0xc5, 0x3f, 0x31, + 0x0c, 0x64, 0x87, 0xb8, 0xec, 0x4b, 0xc7, 0xf2, 0x50, 0xca, 0x6a, 0x8c, 0x12, 0x24, 0x68, 0xe3, + 0xab, 0x68, 0xa6, 0xe3, 0x9a, 0x0e, 0x7b, 0x6f, 0x96, 0xee, 0xf1, 0x4f, 0xcc, 0xa2, 0xe8, 0x5d, + 0xd3, 0x46, 0x1c, 0x01, 0x92, 0x7d, 0x78, 0x7d, 0x04, 0xde, 0xc8, 0xd6, 0x72, 0x85, 0xa0, 0x3e, + 0x02, 0x6f, 0x03, 0x09, 0x9d, 0xfd, 0x0c, 0x9a, 0x49, 0x8c, 0x4d, 0x5f, 0x0e, 0xe1, 0x0f, 0x72, + 0x28, 0x9e, 0x2f, 0xa7, 0xeb, 0x86, 0xa6, 0xe9, 0x32, 0x82, 0x07, 0xf1, 0x1c, 0xff, 0x72, 0x00, + 0x80, 0x10, 0x07, 0x5f, 0x44, 0xf9, 0x8e, 0xee, 0xef, 0xc6, 0x4f, 0x04, 0x52, 0x92, 0xc0, 0x20, + 0xf8, 0x32, 0x42, 0xf4, 0x2f, 0x90, 0x16, 0xb9, 0xdb, 0x11, 0xcb, 0x20, 0xb9, 0xfd, 0xd0, 0x90, + 0x10, 0x50, 0xb0, 0xe6, 0xff, 0xb5, 0x80, 0x26, 0xa3, 0x73, 0x4b, 0x64, 0xb1, 0x99, 0x7b, 0xe8, + 0x62, 0xf3, 0x12, 0x2a, 0xb6, 0x89, 0xbf, 0xeb, 0x34, 0xe3, 0xf3, 0xe4, 0x3a, 0x6b, 0x05, 0x01, + 0x65, 0xe2, 0x3b, 0xae, 0x2f, 0xc4, 0x0a, 0xc5, 0x77, 0x5c, 0x1f, 0x18, 0x24, 0x38, 0xd0, 0x98, + 0xef, 0x71, 0xa0, 0xb1, 0x85, 0xa6, 0x79, 0x09, 0xe5, 0x25, 0xe2, 0xfa, 0xc7, 0x3e, 0x88, 0xab, + 0xc5, 0x48, 0x40, 0x82, 0x28, 0x6e, 0x52, 0x6f, 0x43, 0xdb, 0xc2, 0x9d, 0x81, 0xfe, 0x6b, 0x0f, + 0x68, 0x51, 0x0a, 0x10, 0x27, 0x39, 0x8c, 0x6c, 0x64, 0xf4, 0x3d, 0x1e, 0xbb, 0xf6, 0x64, 0x29, + 0xab, 0xda, 0x93, 0x2f, 0xa1, 0xc9, 0xb6, 0x7e, 0x57, 0x1c, 0x4f, 0xd7, 0xcc, 0x7b, 0x44, 0x5c, + 0x8f, 0xc5, 0x0f, 0xee, 0xcf, 0x4d, 0xae, 0x47, 0x20, 0x10, 0xc3, 0x1c, 0x6c, 0x02, 0xfe, 0xc3, + 0x11, 0x84, 0x93, 0x9f, 0x86, 0xc1, 0x1f, 0xe4, 0xd0, 0xe4, 0x9d, 0xc8, 0x18, 0x0d, 0x27, 0x38, + 0x93, 0x69, 0xaf, 0x68, 0x3b, 0xc4, 0x98, 0x2b, 0x0b, 0x9c, 0x91, 0x93, 0x5b, 0x48, 0xd6, 0x8d, + 0x1f, 0xfc, 0xf4, 0xc2, 0x63, 0x3f, 0xfc, 0xe9, 0x85, 0xc7, 0x7e, 0xf4, 0xd3, 0x0b, 0x8f, 0x7d, + 0xf9, 0xc1, 0x85, 0xdc, 0x0f, 0x1e, 0x5c, 0xc8, 0xfd, 0xf0, 0xc1, 0x85, 0xdc, 0x8f, 0x1e, 0x5c, + 0xc8, 0xfd, 0xe4, 0xc1, 0x85, 0xdc, 0xb7, 0xfe, 0xe3, 0xc2, 0x63, 0x9f, 0xfd, 0x74, 0x28, 0xca, + 0x62, 0x20, 0x0a, 0xfb, 0xe7, 0x79, 0xce, 0x7a, 0xb1, 0xb3, 0xd7, 0x5a, 0xa4, 0xa2, 0x2c, 0x2a, + 0xa2, 0x2c, 0x06, 0xa2, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xf8, 0x83, 0x21, 0x1d, + 0xa7, 0x00, 0x00, } func (m *AMQPConsumeConfig) Marshal() (dAtA []byte, err error) { @@ -1787,6 +2347,30 @@ func (m *AMQPEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.URLSecret != nil { + { + size, err := m.URLSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } if m.Auth != nil { { size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) @@ -2032,6 +2616,11 @@ func (m *AMQPQueueDeclareConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + i -= len(m.Arguments) + copy(dAtA[i:], m.Arguments) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Arguments))) + i-- + dAtA[i] = 0x32 i-- if m.NoWait { dAtA[i] = 1 @@ -2092,6 +2681,18 @@ func (m *AzureEventsHubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, erro _ = i var l int _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -2153,7 +2754,7 @@ func (m *AzureEventsHubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *CalendarEventSource) Marshal() (dAtA []byte, err error) { +func (m *AzureQueueStorageEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2163,19 +2764,32 @@ func (m *CalendarEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CalendarEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *AzureQueueStorageEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CalendarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AzureQueueStorageEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Persistence != nil { + if m.WaitTimeInSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.WaitTimeInSeconds)) + i-- + dAtA[i] = 0x48 + } + i-- + if m.DecodeMessage { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.Filter != nil { { - size, err := m.Persistence.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2209,113 +2823,48 @@ func (m *CalendarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x32 } } - if m.UserPayload != nil { - i -= len(m.UserPayload) - copy(dAtA[i:], m.UserPayload) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserPayload))) - i-- - dAtA[i] = 0x2a - } - i -= len(m.Timezone) - copy(dAtA[i:], m.Timezone) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Timezone))) - i-- - dAtA[i] = 0x22 - if len(m.ExclusionDates) > 0 { - for iNdEx := len(m.ExclusionDates) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExclusionDates[iNdEx]) - copy(dAtA[i:], m.ExclusionDates[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExclusionDates[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.Interval) - copy(dAtA[i:], m.Interval) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Interval))) - i-- - dAtA[i] = 0x12 - i -= len(m.Schedule) - copy(dAtA[i:], m.Schedule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CatchupConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CatchupConfiguration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CatchupConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.MaxDuration) - copy(dAtA[i:], m.MaxDuration) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MaxDuration))) - i-- - dAtA[i] = 0x12 i-- - if m.Enabled { + if m.DLQ { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *ConfigMapPersistence) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMapPersistence) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigMapPersistence) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l + dAtA[i] = 0x28 i-- - if m.CreateIfNotExist { + if m.JSONBody { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + dAtA[i] = 0x20 + i -= len(m.QueueName) + copy(dAtA[i:], m.QueueName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QueueName))) + i-- + dAtA[i] = 0x1a + if m.ConnectionString != nil { + { + size, err := m.ConnectionString.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.StorageAccountName) + copy(dAtA[i:], m.StorageAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageAccountName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *EmitterEventSource) Marshal() (dAtA []byte, err error) { +func (m *AzureServiceBusEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2325,16 +2874,33 @@ func (m *EmitterEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *EmitterEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *AzureServiceBusEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EmitterEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AzureServiceBusEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + i -= len(m.FullyQualifiedNamespace) + copy(dAtA[i:], m.FullyQualifiedNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FullyQualifiedNamespace))) + i-- + dAtA[i] = 0x4a + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -2356,9 +2922,17 @@ func (m *EmitterEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x3a } } + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 if m.TLS != nil { { size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) @@ -2369,19 +2943,26 @@ func (m *EmitterEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x2a } + i -= len(m.SubscriptionName) + copy(dAtA[i:], m.SubscriptionName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubscriptionName))) i-- - if m.JSONBody { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } + dAtA[i] = 0x22 + i -= len(m.TopicName) + copy(dAtA[i:], m.TopicName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicName))) i-- - dAtA[i] = 0x38 - if m.ConnectionBackoff != nil { + dAtA[i] = 0x1a + i -= len(m.QueueName) + copy(dAtA[i:], m.QueueName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QueueName))) + i-- + dAtA[i] = 0x12 + if m.ConnectionString != nil { { - size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ConnectionString.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2389,11 +2970,34 @@ func (m *EmitterEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 + dAtA[i] = 0xa } - if m.Password != nil { + return len(dAtA) - i, nil +} + +func (m *BitbucketAuth) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BitbucketAuth) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BitbucketAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OAuthToken != nil { { - size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.OAuthToken.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2401,11 +3005,11 @@ func (m *EmitterEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x12 } - if m.Username != nil { + if m.Basic != nil { { - size, err := m.Username.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2413,27 +3017,12 @@ func (m *EmitterEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0xa } - i -= len(m.ChannelName) - copy(dAtA[i:], m.ChannelName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ChannelName))) - i-- - dAtA[i] = 0x1a - i -= len(m.ChannelKey) - copy(dAtA[i:], m.ChannelKey) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ChannelKey))) - i-- - dAtA[i] = 0x12 - i -= len(m.Broker) - copy(dAtA[i:], m.Broker) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Broker))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *EventPersistence) Marshal() (dAtA []byte, err error) { +func (m *BitbucketBasicAuth) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2443,19 +3032,19 @@ func (m *EventPersistence) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *EventPersistence) MarshalTo(dAtA []byte) (int, error) { +func (m *BitbucketBasicAuth) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventPersistence) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *BitbucketBasicAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.ConfigMap != nil { + if m.Password != nil { { - size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2465,9 +3054,9 @@ func (m *EventPersistence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - if m.Catchup != nil { + if m.Username != nil { { - size, err := m.Catchup.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Username.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2480,7 +3069,7 @@ func (m *EventPersistence) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *EventSource) Marshal() (dAtA []byte, err error) { +func (m *BitbucketEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2490,50 +3079,126 @@ func (m *EventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *BitbucketEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *BitbucketEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Repositories) > 0 { + for iNdEx := len(m.Repositories) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Repositories[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i -= len(m.DeprecatedRepositorySlug) + copy(dAtA[i:], m.DeprecatedRepositorySlug) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedRepositorySlug))) i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x42 + i -= len(m.DeprecatedProjectKey) + copy(dAtA[i:], m.DeprecatedProjectKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedProjectKey))) + i-- + dAtA[i] = 0x3a + i -= len(m.DeprecatedOwner) + copy(dAtA[i:], m.DeprecatedOwner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedOwner))) + i-- + dAtA[i] = 0x32 + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Events[iNdEx]) + copy(dAtA[i:], m.Events[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.Auth != nil { + { + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + if m.DeleteHookOnFinish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *EventSourceList) Marshal() (dAtA []byte, err error) { +func (m *BitbucketRepository) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2543,44 +3208,30 @@ func (m *EventSourceList) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *EventSourceList) MarshalTo(dAtA []byte) (int, error) { +func (m *BitbucketRepository) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventSourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *BitbucketRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(m.RepositorySlug) + copy(dAtA[i:], m.RepositorySlug) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RepositorySlug))) + i-- + dAtA[i] = 0x12 + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Owner))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *EventSourceSpec) Marshal() (dAtA []byte, err error) { +func (m *BitbucketServerEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2590,96 +3241,127 @@ func (m *EventSourceSpec) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *EventSourceSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *BitbucketServerEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *BitbucketServerEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i -= len(m.CheckInterval) + copy(dAtA[i:], m.CheckInterval) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CheckInterval))) + i-- + dAtA[i] = 0x7a + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x1 + dAtA[i] = 0x72 + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0xe8 + dAtA[i] = 0x6a } - if len(m.Generic) > 0 { - keysForGeneric := make([]string, 0, len(m.Generic)) - for k := range m.Generic { - keysForGeneric = append(keysForGeneric, string(k)) + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForGeneric) - for iNdEx := len(keysForGeneric) - 1; iNdEx >= 0; iNdEx-- { - v := m.Generic[string(keysForGeneric[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- dAtA[i] = 0x12 - i -= len(keysForGeneric[iNdEx]) - copy(dAtA[i:], keysForGeneric[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForGeneric[iNdEx]))) + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe2 + dAtA[i] = 0x62 } } - if len(m.Pulsar) > 0 { - keysForPulsar := make([]string, 0, len(m.Pulsar)) - for k := range m.Pulsar { - keysForPulsar = append(keysForPulsar, string(k)) + i-- + if m.DeleteHookOnFinish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + i -= len(m.BitbucketServerBaseURL) + copy(dAtA[i:], m.BitbucketServerBaseURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BitbucketServerBaseURL))) + i-- + dAtA[i] = 0x52 + if m.WebhookSecret != nil { + { + size, err := m.WebhookSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForPulsar) - for iNdEx := len(keysForPulsar) - 1; iNdEx >= 0; iNdEx-- { - v := m.Pulsar[string(keysForPulsar[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.AccessToken != nil { + { + size, err := m.AccessToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForPulsar[iNdEx]) - copy(dAtA[i:], keysForPulsar[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPulsar[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if len(m.NSQ) > 0 { - keysForNSQ := make([]string, 0, len(m.NSQ)) - for k := range m.NSQ { - keysForNSQ = append(keysForNSQ, string(k)) + i-- + if m.SkipBranchRefsChangedOnOpenPR { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Events[iNdEx]) + copy(dAtA[i:], m.Events[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) + i-- + dAtA[i] = 0x32 } - github_com_gogo_protobuf_sortkeys.Strings(keysForNSQ) - for iNdEx := len(keysForNSQ) - 1; iNdEx >= 0; iNdEx-- { - v := m.NSQ[string(keysForNSQ[iNdEx])] - baseI := i + } + if len(m.Repositories) > 0 { + for iNdEx := len(m.Repositories) - 1; iNdEx >= 0; iNdEx-- { { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Repositories[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2687,247 +3369,525 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 - i -= len(keysForNSQ[iNdEx]) - copy(dAtA[i:], keysForNSQ[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNSQ[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 + dAtA[i] = 0x2a + } + } + if len(m.Projects) > 0 { + for iNdEx := len(m.Projects) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Projects[iNdEx]) + copy(dAtA[i:], m.Projects[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Projects[iNdEx]))) i-- - dAtA[i] = 0xd2 + dAtA[i] = 0x22 } } - if len(m.Redis) > 0 { - keysForRedis := make([]string, 0, len(m.Redis)) - for k := range m.Redis { - keysForRedis = append(keysForRedis, string(k)) + i -= len(m.DeprecatedRepositorySlug) + copy(dAtA[i:], m.DeprecatedRepositorySlug) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedRepositorySlug))) + i-- + dAtA[i] = 0x1a + i -= len(m.DeprecatedProjectKey) + copy(dAtA[i:], m.DeprecatedProjectKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedProjectKey))) + i-- + dAtA[i] = 0x12 + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForRedis) - for iNdEx := len(keysForRedis) - 1; iNdEx >= 0; iNdEx-- { - v := m.Redis[string(keysForRedis[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BitbucketServerRepository) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BitbucketServerRepository) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BitbucketServerRepository) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RepositorySlug) + copy(dAtA[i:], m.RepositorySlug) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RepositorySlug))) + i-- + dAtA[i] = 0x12 + i -= len(m.ProjectKey) + copy(dAtA[i:], m.ProjectKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProjectKey))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CalendarEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CalendarEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CalendarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForRedis[iNdEx]) - copy(dAtA[i:], keysForRedis[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRedis[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if len(m.Emitter) > 0 { - keysForEmitter := make([]string, 0, len(m.Emitter)) - for k := range m.Emitter { - keysForEmitter = append(keysForEmitter, string(k)) + if m.Persistence != nil { + { + size, err := m.Persistence.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForEmitter) - for iNdEx := len(keysForEmitter) - 1; iNdEx >= 0; iNdEx-- { - v := m.Emitter[string(keysForEmitter[iNdEx])] + i-- + dAtA[i] = 0x32 + } + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - i -= len(keysForEmitter[iNdEx]) - copy(dAtA[i:], keysForEmitter[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForEmitter[iNdEx]))) + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 + dAtA[i] = 0x2a } } - if len(m.Stripe) > 0 { - keysForStripe := make([]string, 0, len(m.Stripe)) - for k := range m.Stripe { - keysForStripe = append(keysForStripe, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStripe) - for iNdEx := len(keysForStripe) - 1; iNdEx >= 0; iNdEx-- { - v := m.Stripe[string(keysForStripe[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForStripe[iNdEx]) - copy(dAtA[i:], keysForStripe[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStripe[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 + i -= len(m.Timezone) + copy(dAtA[i:], m.Timezone) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Timezone))) + i-- + dAtA[i] = 0x22 + if len(m.ExclusionDates) > 0 { + for iNdEx := len(m.ExclusionDates) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExclusionDates[iNdEx]) + copy(dAtA[i:], m.ExclusionDates[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExclusionDates[iNdEx]))) i-- - dAtA[i] = 0xba + dAtA[i] = 0x1a } } - if len(m.AzureEventsHub) > 0 { - keysForAzureEventsHub := make([]string, 0, len(m.AzureEventsHub)) - for k := range m.AzureEventsHub { - keysForAzureEventsHub = append(keysForAzureEventsHub, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAzureEventsHub) - for iNdEx := len(keysForAzureEventsHub) - 1; iNdEx >= 0; iNdEx-- { - v := m.AzureEventsHub[string(keysForAzureEventsHub[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i -= len(m.Interval) + copy(dAtA[i:], m.Interval) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Interval))) + i-- + dAtA[i] = 0x12 + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CatchupConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CatchupConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CatchupConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MaxDuration) + copy(dAtA[i:], m.MaxDuration) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MaxDuration))) + i-- + dAtA[i] = 0x12 + i-- + if m.Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ConfigMapPersistence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapPersistence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapPersistence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.CreateIfNotExist { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EmitterEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmitterEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmitterEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForAzureEventsHub[iNdEx]) - copy(dAtA[i:], keysForAzureEventsHub[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAzureEventsHub[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x52 } - if len(m.StorageGrid) > 0 { - keysForStorageGrid := make([]string, 0, len(m.StorageGrid)) - for k := range m.StorageGrid { - keysForStorageGrid = append(keysForStorageGrid, string(k)) + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForStorageGrid) - for iNdEx := len(keysForStorageGrid) - 1; iNdEx >= 0; iNdEx-- { - v := m.StorageGrid[string(keysForStorageGrid[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - i -= len(keysForStorageGrid[iNdEx]) - copy(dAtA[i:], keysForStorageGrid[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStorageGrid[iNdEx]))) + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa + dAtA[i] = 0x4a } } - if len(m.Slack) > 0 { - keysForSlack := make([]string, 0, len(m.Slack)) - for k := range m.Slack { - keysForSlack = append(keysForSlack, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSlack) - for iNdEx := len(keysForSlack) - 1; iNdEx >= 0; iNdEx-- { - v := m.Slack[string(keysForSlack[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForSlack[iNdEx]) - copy(dAtA[i:], keysForSlack[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSlack[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if len(m.HDFS) > 0 { - keysForHDFS := make([]string, 0, len(m.HDFS)) - for k := range m.HDFS { - keysForHDFS = append(keysForHDFS, string(k)) + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.ConnectionBackoff != nil { + { + size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForHDFS) - for iNdEx := len(keysForHDFS) - 1; iNdEx >= 0; iNdEx-- { - v := m.HDFS[string(keysForHDFS[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.Password != nil { + { + size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForHDFS[iNdEx]) - copy(dAtA[i:], keysForHDFS[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHDFS[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if len(m.Gitlab) > 0 { - keysForGitlab := make([]string, 0, len(m.Gitlab)) - for k := range m.Gitlab { - keysForGitlab = append(keysForGitlab, string(k)) + if m.Username != nil { + { + size, err := m.Username.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForGitlab) - for iNdEx := len(keysForGitlab) - 1; iNdEx >= 0; iNdEx-- { - v := m.Gitlab[string(keysForGitlab[iNdEx])] - baseI := i + i-- + dAtA[i] = 0x22 + } + i -= len(m.ChannelName) + copy(dAtA[i:], m.ChannelName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ChannelName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ChannelKey) + copy(dAtA[i:], m.ChannelKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ChannelKey))) + i-- + dAtA[i] = 0x12 + i -= len(m.Broker) + copy(dAtA[i:], m.Broker) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Broker))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventPersistence) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventPersistence) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventPersistence) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Catchup != nil { + { + size, err := m.Catchup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventSourceFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSourceFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSourceFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Expression) + copy(dAtA[i:], m.Expression) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Expression))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventSourceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSourceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -2936,27 +3896,50 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForGitlab[iNdEx]) - copy(dAtA[i:], keysForGitlab[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForGitlab[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 } } - if len(m.Github) > 0 { - keysForGithub := make([]string, 0, len(m.Github)) - for k := range m.Github { - keysForGithub = append(keysForGithub, string(k)) + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - github_com_gogo_protobuf_sortkeys.Strings(keysForGithub) - for iNdEx := len(keysForGithub) - 1; iNdEx >= 0; iNdEx-- { - v := m.Github[string(keysForGithub[iNdEx])] - baseI := i + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventSourceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSourceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Gerrit) > 0 { + keysForGerrit := make([]string, 0, len(m.Gerrit)) + for k := range m.Gerrit { + keysForGerrit = append(keysForGerrit, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForGerrit) + for iNdEx := len(keysForGerrit) - 1; iNdEx >= 0; iNdEx-- { + v := m.Gerrit[string(keysForGerrit[iNdEx])] + baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2967,26 +3950,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForGithub[iNdEx]) - copy(dAtA[i:], keysForGithub[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForGithub[iNdEx]))) + i -= len(keysForGerrit[iNdEx]) + copy(dAtA[i:], keysForGerrit[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForGerrit[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x1 + dAtA[i] = 0x2 i-- - dAtA[i] = 0x8a + dAtA[i] = 0x9a } } - if len(m.PubSub) > 0 { - keysForPubSub := make([]string, 0, len(m.PubSub)) - for k := range m.PubSub { - keysForPubSub = append(keysForPubSub, string(k)) + if len(m.SFTP) > 0 { + keysForSFTP := make([]string, 0, len(m.SFTP)) + for k := range m.SFTP { + keysForSFTP = append(keysForSFTP, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForPubSub) - for iNdEx := len(keysForPubSub) - 1; iNdEx >= 0; iNdEx-- { - v := m.PubSub[string(keysForPubSub[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForSFTP) + for iNdEx := len(keysForSFTP) - 1; iNdEx >= 0; iNdEx-- { + v := m.SFTP[string(keysForSFTP[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -2998,26 +3981,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForPubSub[iNdEx]) - copy(dAtA[i:], keysForPubSub[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPubSub[iNdEx]))) + i -= len(keysForSFTP[iNdEx]) + copy(dAtA[i:], keysForSFTP[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSFTP[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x1 + dAtA[i] = 0x2 i-- - dAtA[i] = 0x82 + dAtA[i] = 0x92 } } - if len(m.SQS) > 0 { - keysForSQS := make([]string, 0, len(m.SQS)) - for k := range m.SQS { - keysForSQS = append(keysForSQS, string(k)) + if len(m.AzureQueueStorage) > 0 { + keysForAzureQueueStorage := make([]string, 0, len(m.AzureQueueStorage)) + for k := range m.AzureQueueStorage { + keysForAzureQueueStorage = append(keysForAzureQueueStorage, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSQS) - for iNdEx := len(keysForSQS) - 1; iNdEx >= 0; iNdEx-- { - v := m.SQS[string(keysForSQS[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForAzureQueueStorage) + for iNdEx := len(keysForAzureQueueStorage) - 1; iNdEx >= 0; iNdEx-- { + v := m.AzureQueueStorage[string(keysForAzureQueueStorage[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3029,24 +4012,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForSQS[iNdEx]) - copy(dAtA[i:], keysForSQS[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSQS[iNdEx]))) + i -= len(keysForAzureQueueStorage[iNdEx]) + copy(dAtA[i:], keysForAzureQueueStorage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAzureQueueStorage[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x7a + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a } } - if len(m.SNS) > 0 { - keysForSNS := make([]string, 0, len(m.SNS)) - for k := range m.SNS { - keysForSNS = append(keysForSNS, string(k)) + if len(m.AzureServiceBus) > 0 { + keysForAzureServiceBus := make([]string, 0, len(m.AzureServiceBus)) + for k := range m.AzureServiceBus { + keysForAzureServiceBus = append(keysForAzureServiceBus, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSNS) - for iNdEx := len(keysForSNS) - 1; iNdEx >= 0; iNdEx-- { - v := m.SNS[string(keysForSNS[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForAzureServiceBus) + for iNdEx := len(keysForAzureServiceBus) - 1; iNdEx >= 0; iNdEx-- { + v := m.AzureServiceBus[string(keysForAzureServiceBus[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3058,24 +4043,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForSNS[iNdEx]) - copy(dAtA[i:], keysForSNS[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSNS[iNdEx]))) + i -= len(keysForAzureServiceBus[iNdEx]) + copy(dAtA[i:], keysForAzureServiceBus[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAzureServiceBus[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x72 + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 } } - if len(m.NATS) > 0 { - keysForNATS := make([]string, 0, len(m.NATS)) - for k := range m.NATS { - keysForNATS = append(keysForNATS, string(k)) + if len(m.RedisStream) > 0 { + keysForRedisStream := make([]string, 0, len(m.RedisStream)) + for k := range m.RedisStream { + keysForRedisStream = append(keysForRedisStream, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForNATS) - for iNdEx := len(keysForNATS) - 1; iNdEx >= 0; iNdEx-- { - v := m.NATS[string(keysForNATS[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForRedisStream) + for iNdEx := len(keysForRedisStream) - 1; iNdEx >= 0; iNdEx-- { + v := m.RedisStream[string(keysForRedisStream[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3087,24 +4074,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForNATS[iNdEx]) - copy(dAtA[i:], keysForNATS[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNATS[iNdEx]))) + i -= len(keysForRedisStream[iNdEx]) + copy(dAtA[i:], keysForRedisStream[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRedisStream[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x6a + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa } } - if len(m.MQTT) > 0 { - keysForMQTT := make([]string, 0, len(m.MQTT)) - for k := range m.MQTT { - keysForMQTT = append(keysForMQTT, string(k)) + if len(m.Bitbucket) > 0 { + keysForBitbucket := make([]string, 0, len(m.Bitbucket)) + for k := range m.Bitbucket { + keysForBitbucket = append(keysForBitbucket, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMQTT) - for iNdEx := len(keysForMQTT) - 1; iNdEx >= 0; iNdEx-- { - v := m.MQTT[string(keysForMQTT[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForBitbucket) + for iNdEx := len(keysForBitbucket) - 1; iNdEx >= 0; iNdEx-- { + v := m.Bitbucket[string(keysForBitbucket[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3116,24 +4105,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForMQTT[iNdEx]) - copy(dAtA[i:], keysForMQTT[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMQTT[iNdEx]))) + i -= len(keysForBitbucket[iNdEx]) + copy(dAtA[i:], keysForBitbucket[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBitbucket[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x62 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf2 } } - if len(m.Kafka) > 0 { - keysForKafka := make([]string, 0, len(m.Kafka)) - for k := range m.Kafka { - keysForKafka = append(keysForKafka, string(k)) + if len(m.BitbucketServer) > 0 { + keysForBitbucketServer := make([]string, 0, len(m.BitbucketServer)) + for k := range m.BitbucketServer { + keysForBitbucketServer = append(keysForBitbucketServer, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForKafka) - for iNdEx := len(keysForKafka) - 1; iNdEx >= 0; iNdEx-- { - v := m.Kafka[string(keysForKafka[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForBitbucketServer) + for iNdEx := len(keysForBitbucketServer) - 1; iNdEx >= 0; iNdEx-- { + v := m.BitbucketServer[string(keysForBitbucketServer[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3145,24 +4136,33 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForKafka[iNdEx]) - copy(dAtA[i:], keysForKafka[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForKafka[iNdEx]))) + i -= len(keysForBitbucketServer[iNdEx]) + copy(dAtA[i:], keysForBitbucketServer[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBitbucketServer[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x5a + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea } } - if len(m.AMQP) > 0 { - keysForAMQP := make([]string, 0, len(m.AMQP)) - for k := range m.AMQP { - keysForAMQP = append(keysForAMQP, string(k)) + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if len(m.Generic) > 0 { + keysForGeneric := make([]string, 0, len(m.Generic)) + for k := range m.Generic { + keysForGeneric = append(keysForGeneric, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForAMQP) - for iNdEx := len(keysForAMQP) - 1; iNdEx >= 0; iNdEx-- { - v := m.AMQP[string(keysForAMQP[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForGeneric) + for iNdEx := len(keysForGeneric) - 1; iNdEx >= 0; iNdEx-- { + v := m.Generic[string(keysForGeneric[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3174,24 +4174,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForAMQP[iNdEx]) - copy(dAtA[i:], keysForAMQP[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAMQP[iNdEx]))) + i -= len(keysForGeneric[iNdEx]) + copy(dAtA[i:], keysForGeneric[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForGeneric[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda } } - if len(m.Webhook) > 0 { - keysForWebhook := make([]string, 0, len(m.Webhook)) - for k := range m.Webhook { - keysForWebhook = append(keysForWebhook, string(k)) + if len(m.Pulsar) > 0 { + keysForPulsar := make([]string, 0, len(m.Pulsar)) + for k := range m.Pulsar { + keysForPulsar = append(keysForPulsar, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForWebhook) - for iNdEx := len(keysForWebhook) - 1; iNdEx >= 0; iNdEx-- { - v := m.Webhook[string(keysForWebhook[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForPulsar) + for iNdEx := len(keysForPulsar) - 1; iNdEx >= 0; iNdEx-- { + v := m.Pulsar[string(keysForPulsar[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3203,24 +4205,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForWebhook[iNdEx]) - copy(dAtA[i:], keysForWebhook[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForWebhook[iNdEx]))) + i -= len(keysForPulsar[iNdEx]) + copy(dAtA[i:], keysForPulsar[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPulsar[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 } } - if len(m.Resource) > 0 { - keysForResource := make([]string, 0, len(m.Resource)) - for k := range m.Resource { - keysForResource = append(keysForResource, string(k)) + if len(m.NSQ) > 0 { + keysForNSQ := make([]string, 0, len(m.NSQ)) + for k := range m.NSQ { + keysForNSQ = append(keysForNSQ, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForResource) - for iNdEx := len(keysForResource) - 1; iNdEx >= 0; iNdEx-- { - v := m.Resource[string(keysForResource[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForNSQ) + for iNdEx := len(keysForNSQ) - 1; iNdEx >= 0; iNdEx-- { + v := m.NSQ[string(keysForNSQ[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3232,24 +4236,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForResource[iNdEx]) - copy(dAtA[i:], keysForResource[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForResource[iNdEx]))) + i -= len(keysForNSQ[iNdEx]) + copy(dAtA[i:], keysForNSQ[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNSQ[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x42 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca } } - if len(m.File) > 0 { - keysForFile := make([]string, 0, len(m.File)) - for k := range m.File { - keysForFile = append(keysForFile, string(k)) + if len(m.Redis) > 0 { + keysForRedis := make([]string, 0, len(m.Redis)) + for k := range m.Redis { + keysForRedis = append(keysForRedis, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForFile) - for iNdEx := len(keysForFile) - 1; iNdEx >= 0; iNdEx-- { - v := m.File[string(keysForFile[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForRedis) + for iNdEx := len(keysForRedis) - 1; iNdEx >= 0; iNdEx-- { + v := m.Redis[string(keysForRedis[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3261,24 +4267,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForFile[iNdEx]) - copy(dAtA[i:], keysForFile[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForFile[iNdEx]))) + i -= len(keysForRedis[iNdEx]) + copy(dAtA[i:], keysForRedis[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRedis[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 } } - if len(m.Calendar) > 0 { - keysForCalendar := make([]string, 0, len(m.Calendar)) - for k := range m.Calendar { - keysForCalendar = append(keysForCalendar, string(k)) + if len(m.Emitter) > 0 { + keysForEmitter := make([]string, 0, len(m.Emitter)) + for k := range m.Emitter { + keysForEmitter = append(keysForEmitter, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForCalendar) - for iNdEx := len(keysForCalendar) - 1; iNdEx >= 0; iNdEx-- { - v := m.Calendar[string(keysForCalendar[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForEmitter) + for iNdEx := len(keysForEmitter) - 1; iNdEx >= 0; iNdEx-- { + v := m.Emitter[string(keysForEmitter[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3290,24 +4298,26 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForCalendar[iNdEx]) - copy(dAtA[i:], keysForCalendar[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCalendar[iNdEx]))) + i -= len(keysForEmitter[iNdEx]) + copy(dAtA[i:], keysForEmitter[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForEmitter[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba } } - if len(m.Minio) > 0 { - keysForMinio := make([]string, 0, len(m.Minio)) - for k := range m.Minio { - keysForMinio = append(keysForMinio, string(k)) + if len(m.Stripe) > 0 { + keysForStripe := make([]string, 0, len(m.Stripe)) + for k := range m.Stripe { + keysForStripe = append(keysForStripe, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMinio) - for iNdEx := len(keysForMinio) - 1; iNdEx >= 0; iNdEx-- { - v := m.Minio[string(keysForMinio[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForStripe) + for iNdEx := len(keysForStripe) - 1; iNdEx >= 0; iNdEx-- { + v := m.Stripe[string(keysForStripe[iNdEx])] baseI := i { size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) @@ -3319,265 +4329,122 @@ func (m *EventSourceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x12 - i -= len(keysForMinio[iNdEx]) - copy(dAtA[i:], keysForMinio[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMinio[iNdEx]))) + i -= len(keysForStripe[iNdEx]) + copy(dAtA[i:], keysForStripe[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStripe[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x2a - } - } - if m.DeprecatedReplica != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeprecatedReplica)) - i-- - dAtA[i] = 0x20 - } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Template != nil { - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.EventBusName) - copy(dAtA[i:], m.EventBusName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventBusName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *EventSourceStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventSourceStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventSourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *FileEventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil -} - -func (m *FileEventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) + if len(m.AzureEventsHub) > 0 { + keysForAzureEventsHub := make([]string, 0, len(m.AzureEventsHub)) + for k := range m.AzureEventsHub { + keysForAzureEventsHub = append(keysForAzureEventsHub, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[string(keysForMetadata[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForAzureEventsHub) + for iNdEx := len(keysForAzureEventsHub) - 1; iNdEx >= 0; iNdEx-- { + v := m.AzureEventsHub[string(keysForAzureEventsHub[iNdEx])] baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i -= len(keysForAzureEventsHub[iNdEx]) + copy(dAtA[i:], keysForAzureEventsHub[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAzureEventsHub[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa } } - i-- - if m.Polling { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - { - size, err := m.WatchPathConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.StorageGrid) > 0 { + keysForStorageGrid := make([]string, 0, len(m.StorageGrid)) + for k := range m.StorageGrid { + keysForStorageGrid = append(keysForStorageGrid, string(k)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.EventType) - copy(dAtA[i:], m.EventType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventType))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GenericEventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenericEventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenericEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AuthSecret != nil { - { - size, err := m.AuthSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForStorageGrid) + for iNdEx := len(keysForStorageGrid) - 1; iNdEx >= 0; iNdEx-- { + v := m.StorageGrid[string(keysForStorageGrid[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(keysForStorageGrid[iNdEx]) + copy(dAtA[i:], keysForStorageGrid[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStorageGrid[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 } - i-- - dAtA[i] = 0x32 } - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) + if len(m.Slack) > 0 { + keysForSlack := make([]string, 0, len(m.Slack)) + for k := range m.Slack { + keysForSlack = append(keysForSlack, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[string(keysForMetadata[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForSlack) + for iNdEx := len(keysForSlack) - 1; iNdEx >= 0; iNdEx-- { + v := m.Slack[string(keysForSlack[iNdEx])] baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i -= len(keysForSlack[iNdEx]) + copy(dAtA[i:], keysForSlack[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSlack[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a } } - i-- - if m.JSONBody { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i-- - if m.Insecure { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i -= len(m.Config) - copy(dAtA[i:], m.Config) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Config))) - i-- - dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GithubEventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GithubEventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GithubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Repositories) > 0 { - for iNdEx := len(m.Repositories) - 1; iNdEx >= 0; iNdEx-- { + if len(m.HDFS) > 0 { + keysForHDFS := make([]string, 0, len(m.HDFS)) + for k := range m.HDFS { + keysForHDFS = append(keysForHDFS, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHDFS) + for iNdEx := len(keysForHDFS) - 1; iNdEx >= 0; iNdEx-- { + v := m.HDFS[string(keysForHDFS[iNdEx])] + baseI := i { - size, err := m.Repositories[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3585,276 +4452,190 @@ func (m *GithubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x7a + dAtA[i] = 0x12 + i -= len(keysForHDFS[iNdEx]) + copy(dAtA[i:], keysForHDFS[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHDFS[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 } } - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) + if len(m.Gitlab) > 0 { + keysForGitlab := make([]string, 0, len(m.Gitlab)) + for k := range m.Gitlab { + keysForGitlab = append(keysForGitlab, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[string(keysForMetadata[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForGitlab) + for iNdEx := len(keysForGitlab) - 1; iNdEx >= 0; iNdEx-- { + v := m.Gitlab[string(keysForGitlab[iNdEx])] baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i -= len(keysForGitlab[iNdEx]) + copy(dAtA[i:], keysForGitlab[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForGitlab[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x72 - } - } - i-- - if m.DeleteHookOnFinish { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x68 - i -= len(m.GithubUploadURL) - copy(dAtA[i:], m.GithubUploadURL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GithubUploadURL))) - i-- - dAtA[i] = 0x62 - i -= len(m.GithubBaseURL) - copy(dAtA[i:], m.GithubBaseURL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GithubBaseURL))) - i-- - dAtA[i] = 0x5a - i -= len(m.ContentType) - copy(dAtA[i:], m.ContentType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) - i-- - dAtA[i] = 0x52 - i-- - if m.Active { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - i-- - if m.Insecure { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - if m.WebhookSecret != nil { - { - size, err := m.WebhookSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.APIToken != nil { - { - size, err := m.APIToken.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Events[iNdEx]) - copy(dAtA[i:], m.Events[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) + dAtA[i] = 0x1 i-- - dAtA[i] = 0x2a - } - } - i -= len(m.DeprecatedRepository) - copy(dAtA[i:], m.DeprecatedRepository) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedRepository))) - i-- - dAtA[i] = 0x22 - i -= len(m.DeprecatedOwner) - copy(dAtA[i:], m.DeprecatedOwner) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedOwner))) - i-- - dAtA[i] = 0x1a - if m.Webhook != nil { - { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x8a } - i-- - dAtA[i] = 0x12 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *GitlabEventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil -} - -func (m *GitlabEventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GitlabEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) + if len(m.Github) > 0 { + keysForGithub := make([]string, 0, len(m.Github)) + for k := range m.Github { + keysForGithub = append(keysForGithub, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[string(keysForMetadata[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForGithub) + for iNdEx := len(keysForGithub) - 1; iNdEx >= 0; iNdEx-- { + v := m.Github[string(keysForGithub[iNdEx])] baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i -= len(keysForGithub[iNdEx]) + copy(dAtA[i:], keysForGithub[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForGithub[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } } - i-- - if m.DeleteHookOnFinish { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - i -= len(m.GitlabBaseURL) - copy(dAtA[i:], m.GitlabBaseURL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitlabBaseURL))) - i-- - dAtA[i] = 0x32 - i-- - if m.EnableSSLVerification { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - if m.AccessToken != nil { - { - size, err := m.AccessToken.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.PubSub) > 0 { + keysForPubSub := make([]string, 0, len(m.PubSub)) + for k := range m.PubSub { + keysForPubSub = append(keysForPubSub, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPubSub) + for iNdEx := len(keysForPubSub) - 1; iNdEx >= 0; iNdEx-- { + v := m.PubSub[string(keysForPubSub[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(keysForPubSub[iNdEx]) + copy(dAtA[i:], keysForPubSub[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPubSub[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x7a } - i-- - dAtA[i] = 0x22 } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Events[iNdEx]) - copy(dAtA[i:], m.Events[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) + if len(m.SQS) > 0 { + keysForSQS := make([]string, 0, len(m.SQS)) + for k := range m.SQS { + keysForSQS = append(keysForSQS, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSQS) + for iNdEx := len(keysForSQS) - 1; iNdEx >= 0; iNdEx-- { + v := m.SQS[string(keysForSQS[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 + i -= len(keysForSQS[iNdEx]) + copy(dAtA[i:], keysForSQS[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSQS[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x72 } } - i -= len(m.ProjectID) - copy(dAtA[i:], m.ProjectID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProjectID))) - i-- - dAtA[i] = 0x12 - if m.Webhook != nil { - { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.SNS) > 0 { + keysForSNS := make([]string, 0, len(m.SNS)) + for k := range m.SNS { + keysForSNS = append(keysForSNS, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSNS) + for iNdEx := len(keysForSNS) - 1; iNdEx >= 0; iNdEx-- { + v := m.SNS[string(keysForSNS[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(keysForSNS[iNdEx]) + copy(dAtA[i:], keysForSNS[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSNS[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x6a } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HDFSEventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil -} - -func (m *HDFSEventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HDFSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) + if len(m.NATS) > 0 { + keysForNATS := make([]string, 0, len(m.NATS)) + for k := range m.NATS { + keysForNATS = append(keysForNATS, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[string(keysForMetadata[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForNATS) + for iNdEx := len(keysForNATS) - 1; iNdEx >= 0; iNdEx-- { + v := m.NATS[string(keysForNATS[iNdEx])] baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } i-- dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i -= len(keysForNATS[iNdEx]) + copy(dAtA[i:], keysForNATS[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNATS[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) @@ -3862,95 +4643,271 @@ func (m *HDFSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x62 } } - i -= len(m.KrbServicePrincipalName) - copy(dAtA[i:], m.KrbServicePrincipalName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbServicePrincipalName))) - i-- - dAtA[i] = 0x5a - if m.KrbConfigConfigMap != nil { - { - size, err := m.KrbConfigConfigMap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.MQTT) > 0 { + keysForMQTT := make([]string, 0, len(m.MQTT)) + for k := range m.MQTT { + keysForMQTT = append(keysForMQTT, string(k)) } - i-- - dAtA[i] = 0x52 - } - i -= len(m.KrbRealm) - copy(dAtA[i:], m.KrbRealm) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbRealm))) - i-- - dAtA[i] = 0x4a - i -= len(m.KrbUsername) - copy(dAtA[i:], m.KrbUsername) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbUsername))) - i-- - dAtA[i] = 0x42 - if m.KrbKeytabSecret != nil { - { - size, err := m.KrbKeytabSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForMQTT) + for iNdEx := len(keysForMQTT) - 1; iNdEx >= 0; iNdEx-- { + v := m.MQTT[string(keysForMQTT[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(keysForMQTT[iNdEx]) + copy(dAtA[i:], keysForMQTT[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMQTT[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a } - i-- - dAtA[i] = 0x3a } - if m.KrbCCacheSecret != nil { - { - size, err := m.KrbCCacheSecret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Kafka) > 0 { + keysForKafka := make([]string, 0, len(m.Kafka)) + for k := range m.Kafka { + keysForKafka = append(keysForKafka, string(k)) } - i-- - dAtA[i] = 0x32 - } - i -= len(m.HDFSUser) - copy(dAtA[i:], m.HDFSUser) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HDFSUser))) - i-- - dAtA[i] = 0x2a - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Addresses[iNdEx]) - copy(dAtA[i:], m.Addresses[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + github_com_gogo_protobuf_sortkeys.Strings(keysForKafka) + for iNdEx := len(keysForKafka) - 1; iNdEx >= 0; iNdEx-- { + v := m.Kafka[string(keysForKafka[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForKafka[iNdEx]) + copy(dAtA[i:], keysForKafka[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForKafka[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x52 + } + } + if len(m.AMQP) > 0 { + keysForAMQP := make([]string, 0, len(m.AMQP)) + for k := range m.AMQP { + keysForAMQP = append(keysForAMQP, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAMQP) + for iNdEx := len(keysForAMQP) - 1; iNdEx >= 0; iNdEx-- { + v := m.AMQP[string(keysForAMQP[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAMQP[iNdEx]) + copy(dAtA[i:], keysForAMQP[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAMQP[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Webhook) > 0 { + keysForWebhook := make([]string, 0, len(m.Webhook)) + for k := range m.Webhook { + keysForWebhook = append(keysForWebhook, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWebhook) + for iNdEx := len(keysForWebhook) - 1; iNdEx >= 0; iNdEx-- { + v := m.Webhook[string(keysForWebhook[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForWebhook[iNdEx]) + copy(dAtA[i:], keysForWebhook[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForWebhook[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Resource) > 0 { + keysForResource := make([]string, 0, len(m.Resource)) + for k := range m.Resource { + keysForResource = append(keysForResource, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResource) + for iNdEx := len(keysForResource) - 1; iNdEx >= 0; iNdEx-- { + v := m.Resource[string(keysForResource[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForResource[iNdEx]) + copy(dAtA[i:], keysForResource[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForResource[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.File) > 0 { + keysForFile := make([]string, 0, len(m.File)) + for k := range m.File { + keysForFile = append(keysForFile, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFile) + for iNdEx := len(keysForFile) - 1; iNdEx >= 0; iNdEx-- { + v := m.File[string(keysForFile[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForFile[iNdEx]) + copy(dAtA[i:], keysForFile[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForFile[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Calendar) > 0 { + keysForCalendar := make([]string, 0, len(m.Calendar)) + for k := range m.Calendar { + keysForCalendar = append(keysForCalendar, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCalendar) + for iNdEx := len(keysForCalendar) - 1; iNdEx >= 0; iNdEx-- { + v := m.Calendar[string(keysForCalendar[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCalendar[iNdEx]) + copy(dAtA[i:], keysForCalendar[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCalendar[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Minio) > 0 { + keysForMinio := make([]string, 0, len(m.Minio)) + for k := range m.Minio { + keysForMinio = append(keysForMinio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMinio) + for iNdEx := len(keysForMinio) - 1; iNdEx >= 0; iNdEx-- { + v := m.Minio[string(keysForMinio[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMinio[iNdEx]) + copy(dAtA[i:], keysForMinio[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMinio[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- dAtA[i] = 0x22 } } - i -= len(m.CheckInterval) - copy(dAtA[i:], m.CheckInterval) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CheckInterval))) - i-- - dAtA[i] = 0x1a - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x12 - { - size, err := m.WatchPathConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } + i -= len(m.EventBusName) + copy(dAtA[i:], m.EventBusName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventBusName))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *KafkaConsumerGroup) Marshal() (dAtA []byte, err error) { +func (m *EventSourceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3960,38 +4917,30 @@ func (m *KafkaConsumerGroup) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *KafkaConsumerGroup) MarshalTo(dAtA []byte) (int, error) { +func (m *EventSourceStatus) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *KafkaConsumerGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EventSourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.RebalanceStrategy) - copy(dAtA[i:], m.RebalanceStrategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RebalanceStrategy))) - i-- - dAtA[i] = 0x1a - i-- - if m.Oldest { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x10 - i -= len(m.GroupName) - copy(dAtA[i:], m.GroupName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupName))) - i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *KafkaEventSource) Marshal() (dAtA []byte, err error) { +func (m *FileEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4001,39 +4950,19 @@ func (m *KafkaEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *KafkaEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *FileEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *KafkaEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *FileEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.SASL != nil { - { - size, err := m.SASL.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x52 - i = encodeVarintGenerated(dAtA, i, uint64(m.LimitEventsPerSecond)) - i-- - dAtA[i] = 0x48 - if m.ConsumerGroup != nil { + if m.Filter != nil { { - size, err := m.ConsumerGroup.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4041,7 +4970,7 @@ func (m *KafkaEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x2a } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) @@ -4064,60 +4993,36 @@ func (m *KafkaEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x22 } } i-- - if m.JSONBody { + if m.Polling { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x30 - if m.TLS != nil { - { - size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.ConnectionBackoff != nil { - { - size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x18 + { + size, err := m.WatchPathConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0x1a - i -= len(m.Partition) - copy(dAtA[i:], m.Partition) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Partition))) i-- dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i -= len(m.EventType) + copy(dAtA[i:], m.EventType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventType))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *MQTTEventSource) Marshal() (dAtA []byte, err error) { +func (m *GenericEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4127,16 +5032,40 @@ func (m *MQTTEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *MQTTEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *GenericEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *MQTTEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GenericEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.AuthSecret != nil { + { + size, err := m.AuthSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -4158,20 +5087,8 @@ func (m *MQTTEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a - } - } - if m.TLS != nil { - { - size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x2a } - i-- - dAtA[i] = 0x32 } i-- if m.JSONBody { @@ -4180,27 +5097,18 @@ func (m *MQTTEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x28 - if m.ConnectionBackoff != nil { - { - size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + dAtA[i] = 0x20 + i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= len(m.ClientID) - copy(dAtA[i:], m.ClientID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientID))) i-- - dAtA[i] = 0x1a - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + dAtA[i] = 0x18 + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Config))) i-- dAtA[i] = 0x12 i -= len(m.URL) @@ -4211,7 +5119,7 @@ func (m *MQTTEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NATSAuth) Marshal() (dAtA []byte, err error) { +func (m *GerritEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4221,19 +5129,19 @@ func (m *NATSAuth) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NATSAuth) MarshalTo(dAtA []byte) (int, error) { +func (m *GerritEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NATSAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GerritEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Credential != nil { + if m.Filter != nil { { - size, err := m.Credential.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4241,23 +5149,65 @@ func (m *NATSAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x52 } - if m.NKey != nil { - { - size, err := m.NKey.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + if m.SslVerify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if len(m.Projects) > 0 { + for iNdEx := len(m.Projects) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Projects[iNdEx]) + copy(dAtA[i:], m.Projects[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Projects[iNdEx]))) + i-- + dAtA[i] = 0x42 } - i-- - dAtA[i] = 0x1a } - if m.Token != nil { + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.DeleteHookOnFinish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.GerritBaseURL) + copy(dAtA[i:], m.GerritBaseURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GerritBaseURL))) + i-- + dAtA[i] = 0x2a + if m.Auth != nil { { - size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4265,11 +5215,25 @@ func (m *NATSAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } - if m.Basic != nil { + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Events[iNdEx]) + copy(dAtA[i:], m.Events[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.HookName) + copy(dAtA[i:], m.HookName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HookName))) + i-- + dAtA[i] = 0x12 + if m.Webhook != nil { { - size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4282,7 +5246,7 @@ func (m *NATSAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *NATSEventsSource) Marshal() (dAtA []byte, err error) { +func (m *GithubAppCreds) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4292,27 +5256,121 @@ func (m *NATSEventsSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NATSEventsSource) MarshalTo(dAtA []byte) (int, error) { +func (m *GithubAppCreds) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NATSEventsSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GithubAppCreds) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Auth != nil { - { - size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { + i = encodeVarintGenerated(dAtA, i, uint64(m.InstallationID)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.AppID)) + i-- + dAtA[i] = 0x10 + if m.PrivateKey != nil { + { + size, err := m.PrivateKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { return 0, err } i -= size i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x3a + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GithubEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GithubEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GithubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PayloadEnrichment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.GithubApp != nil { + { + size, err := m.GithubApp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if len(m.Organizations) > 0 { + for iNdEx := len(m.Organizations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Organizations[iNdEx]) + copy(dAtA[i:], m.Organizations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organizations[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if len(m.Repositories) > 0 { + for iNdEx := len(m.Repositories) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Repositories[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) @@ -4335,12 +5393,51 @@ func (m *NATSEventsSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x72 } } - if m.TLS != nil { + i-- + if m.DeleteHookOnFinish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i -= len(m.GithubUploadURL) + copy(dAtA[i:], m.GithubUploadURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GithubUploadURL))) + i-- + dAtA[i] = 0x62 + i -= len(m.GithubBaseURL) + copy(dAtA[i:], m.GithubBaseURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GithubBaseURL))) + i-- + dAtA[i] = 0x5a + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x52 + i-- + if m.Active { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.WebhookSecret != nil { { - size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.WebhookSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4348,19 +5445,42 @@ func (m *NATSEventsSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x3a } - i-- - if m.JSONBody { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.APIToken != nil { + { + size, err := m.APIToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Events[iNdEx]) + copy(dAtA[i:], m.Events[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) + i-- + dAtA[i] = 0x2a + } } + i -= len(m.DeprecatedRepository) + copy(dAtA[i:], m.DeprecatedRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedRepository))) i-- - dAtA[i] = 0x20 - if m.ConnectionBackoff != nil { + dAtA[i] = 0x22 + i -= len(m.DeprecatedOwner) + copy(dAtA[i:], m.DeprecatedOwner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedOwner))) + i-- + dAtA[i] = 0x1a + if m.Webhook != nil { { - size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4368,22 +5488,15 @@ func (m *NATSEventsSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } - i -= len(m.Subject) - copy(dAtA[i:], m.Subject) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subject))) - i-- - dAtA[i] = 0x12 - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i = encodeVarintGenerated(dAtA, i, uint64(m.ID)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *NSQEventSource) Marshal() (dAtA []byte, err error) { +func (m *GitlabEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4393,16 +5506,58 @@ func (m *NSQEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *NSQEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *GitlabEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *NSQEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *GitlabEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x6a + } + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.SecretToken != nil { + { + size, err := m.SecretToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.Projects) > 0 { + for iNdEx := len(m.Projects) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Projects[iNdEx]) + copy(dAtA[i:], m.Projects[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Projects[iNdEx]))) + i-- + dAtA[i] = 0x52 + } + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -4424,32 +5579,33 @@ func (m *NSQEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x4a } } - if m.TLS != nil { - { - size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + i-- + if m.DeleteHookOnFinish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } i-- - if m.JSONBody { + dAtA[i] = 0x40 + i -= len(m.GitlabBaseURL) + copy(dAtA[i:], m.GitlabBaseURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GitlabBaseURL))) + i-- + dAtA[i] = 0x32 + i-- + if m.EnableSSLVerification { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- dAtA[i] = 0x28 - if m.ConnectionBackoff != nil { + if m.AccessToken != nil { { - size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.AccessToken.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4459,129 +5615,23 @@ func (m *NSQEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - i -= len(m.Channel) - copy(dAtA[i:], m.Channel) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Channel))) - i-- - dAtA[i] = 0x1a - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0x12 - i -= len(m.HostAddress) - copy(dAtA[i:], m.HostAddress) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostAddress))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OwnedRepositories) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OwnedRepositories) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OwnedRepositories) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PubSubEventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PubSubEventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PubSubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[string(keysForMetadata[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Events[iNdEx]) + copy(dAtA[i:], m.Events[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x1a } } - i -= len(m.DeprecatedCredentialsFile) - copy(dAtA[i:], m.DeprecatedCredentialsFile) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedCredentialsFile))) + i -= len(m.DeprecatedProjectID) + copy(dAtA[i:], m.DeprecatedProjectID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedProjectID))) i-- - dAtA[i] = 0x42 - i-- - if m.JSONBody { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - i-- - if m.DeleteSubscriptionOnFinish { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if m.CredentialSecret != nil { + dAtA[i] = 0x12 + if m.Webhook != nil { { - size, err := m.CredentialSecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4589,32 +5639,12 @@ func (m *PubSubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa } - i -= len(m.SubscriptionID) - copy(dAtA[i:], m.SubscriptionID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubscriptionID))) - i-- - dAtA[i] = 0x22 - i -= len(m.Topic) - copy(dAtA[i:], m.Topic) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) - i-- - dAtA[i] = 0x1a - i -= len(m.TopicProjectID) - copy(dAtA[i:], m.TopicProjectID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicProjectID))) - i-- - dAtA[i] = 0x12 - i -= len(m.ProjectID) - copy(dAtA[i:], m.ProjectID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProjectID))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *PulsarEventSource) Marshal() (dAtA []byte, err error) { +func (m *HDFSEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4624,16 +5654,28 @@ func (m *PulsarEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *PulsarEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *HDFSEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *PulsarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *HDFSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -4655,20 +5697,17 @@ func (m *PulsarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x52 + dAtA[i] = 0x62 } } + i -= len(m.KrbServicePrincipalName) + copy(dAtA[i:], m.KrbServicePrincipalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbServicePrincipalName))) i-- - if m.JSONBody { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - if m.ConnectionBackoff != nil { + dAtA[i] = 0x5a + if m.KrbConfigConfigMap != nil { { - size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.KrbConfigConfigMap.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4676,11 +5715,21 @@ func (m *PulsarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x52 } - if m.TLS != nil { + i -= len(m.KrbRealm) + copy(dAtA[i:], m.KrbRealm) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbRealm))) + i-- + dAtA[i] = 0x4a + i -= len(m.KrbUsername) + copy(dAtA[i:], m.KrbUsername) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KrbUsername))) + i-- + dAtA[i] = 0x42 + if m.KrbKeytabSecret != nil { { - size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.KrbKeytabSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4690,25 +5739,9 @@ func (m *PulsarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - i-- - if m.TLSValidateHostname { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - i-- - if m.TLSAllowInsecureConnection { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - if m.TLSTrustCertsSecret != nil { + if m.KrbCCacheSecret != nil { { - size, err := m.TLSTrustCertsSecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.KrbCCacheSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4716,11 +5749,25 @@ func (m *PulsarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x32 } - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i -= len(m.HDFSUser) + copy(dAtA[i:], m.HDFSUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HDFSUser))) + i-- + dAtA[i] = 0x2a + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.CheckInterval) + copy(dAtA[i:], m.CheckInterval) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CheckInterval))) i-- dAtA[i] = 0x1a i -= len(m.Type) @@ -4728,19 +5775,20 @@ func (m *PulsarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i-- dAtA[i] = 0x12 - if len(m.Topics) > 0 { - for iNdEx := len(m.Topics) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Topics[iNdEx]) - copy(dAtA[i:], m.Topics[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topics[iNdEx]))) - i-- - dAtA[i] = 0xa + { + size, err := m.WatchPathConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *RedisEventSource) Marshal() (dAtA []byte, err error) { +func (m *KafkaConsumerGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4750,18 +5798,108 @@ func (m *RedisEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *RedisEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *KafkaConsumerGroup) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *RedisEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *KafkaConsumerGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) + i -= len(m.RebalanceStrategy) + copy(dAtA[i:], m.RebalanceStrategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RebalanceStrategy))) + i-- + dAtA[i] = 0x1a + i-- + if m.Oldest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.GroupName) + copy(dAtA[i:], m.GroupName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KafkaEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KafkaEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KafkaEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Config) + copy(dAtA[i:], m.Config) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Config))) + i-- + dAtA[i] = 0x6a + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.SASL != nil { + { + size, err := m.SASL.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x52 + i = encodeVarintGenerated(dAtA, i, uint64(m.LimitEventsPerSecond)) + i-- + dAtA[i] = 0x48 + if m.ConsumerGroup != nil { + { + size, err := m.ConsumerGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { keysForMetadata = append(keysForMetadata, string(k)) } @@ -4784,6 +5922,14 @@ func (m *RedisEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x3a } } + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 if m.TLS != nil { { size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) @@ -4794,28 +5940,11 @@ func (m *RedisEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 - } - if len(m.Channels) > 0 { - for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Channels[iNdEx]) - copy(dAtA[i:], m.Channels[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Channels[iNdEx]))) - i-- - dAtA[i] = 0x2a - } + dAtA[i] = 0x2a } - i = encodeVarintGenerated(dAtA, i, uint64(m.DB)) - i-- - dAtA[i] = 0x20 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x1a - if m.Password != nil { + if m.ConnectionBackoff != nil { { - size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4823,17 +5952,27 @@ func (m *RedisEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } - i -= len(m.HostAddress) - copy(dAtA[i:], m.HostAddress) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostAddress))) + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x1a + i -= len(m.Partition) + copy(dAtA[i:], m.Partition) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Partition))) + i-- + dAtA[i] = 0x12 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceEventSource) Marshal() (dAtA []byte, err error) { +func (m *MQTTEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4843,16 +5982,40 @@ func (m *ResourceEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *MQTTEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *MQTTEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.Auth != nil { + { + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -4874,31 +6037,32 @@ func (m *ResourceEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x3a } } - if len(m.EventTypes) > 0 { - for iNdEx := len(m.EventTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EventTypes[iNdEx]) - copy(dAtA[i:], m.EventTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventTypes[iNdEx]))) - i-- - dAtA[i] = 0x22 + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x32 } - { - size, err := m.GroupVersionResource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } i-- - dAtA[i] = 0x1a - if m.Filter != nil { + dAtA[i] = 0x28 + if m.ConnectionBackoff != nil { { - size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4906,17 +6070,27 @@ func (m *ResourceEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x22 } - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i -= len(m.ClientID) + copy(dAtA[i:], m.ClientID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x12 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *ResourceFilter) Marshal() (dAtA []byte, err error) { +func (m *NATSAuth) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -4926,71 +6100,68 @@ func (m *ResourceFilter) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ResourceFilter) MarshalTo(dAtA []byte) (int, error) { +func (m *NATSAuth) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *ResourceFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NATSAuth) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i-- - if m.AfterStart { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - { - size, err := m.CreatedBy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Credential != nil { + { + size, err := m.Credential.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x22 } - i-- - dAtA[i] = 0x22 - if len(m.Fields) > 0 { - for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.NKey != nil { + { + size, err := m.NKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Labels) > 0 { - for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Token != nil { + { + size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 + } + if m.Basic != nil { + { + size, err := m.Basic.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i -= len(m.Prefix) - copy(dAtA[i:], m.Prefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i-- - dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *SNSEventSource) Marshal() (dAtA []byte, err error) { +func (m *NATSEventsSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5000,24 +6171,47 @@ func (m *SNSEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SNSEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *NATSEventsSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SNSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NATSEventsSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i-- - if m.ValidateSignature { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Queue != nil { + i -= len(*m.Queue) + copy(dAtA[i:], *m.Queue) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Queue))) + i-- + dAtA[i] = 0x4a + } + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Auth != nil { + { + size, err := m.Auth.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - i-- - dAtA[i] = 0x40 if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -5039,22 +6233,12 @@ func (m *SNSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x32 } } - i -= len(m.RoleARN) - copy(dAtA[i:], m.RoleARN) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) - i-- - dAtA[i] = 0x32 - i -= len(m.Region) - copy(dAtA[i:], m.Region) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) - i-- - dAtA[i] = 0x2a - if m.SecretKey != nil { + if m.TLS != nil { { - size, err := m.SecretKey.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5062,11 +6246,19 @@ func (m *SNSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x22 + dAtA[i] = 0x2a } - if m.AccessKey != nil { + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.ConnectionBackoff != nil { { - size, err := m.AccessKey.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5076,27 +6268,20 @@ func (m *SNSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x1a } - i -= len(m.TopicArn) - copy(dAtA[i:], m.TopicArn) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicArn))) + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subject))) i-- dAtA[i] = 0x12 - if m.Webhook != nil { - { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *SQSEventSource) Marshal() (dAtA []byte, err error) { +func (m *NSQEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5106,16 +6291,28 @@ func (m *SQSEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SQSEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *NSQEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SQSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *NSQEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -5137,43 +6334,12 @@ func (m *SQSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x4a + dAtA[i] = 0x3a } } - i -= len(m.QueueAccountID) - copy(dAtA[i:], m.QueueAccountID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.QueueAccountID))) - i-- - dAtA[i] = 0x42 - i-- - if m.JSONBody { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - i -= len(m.RoleARN) - copy(dAtA[i:], m.RoleARN) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.WaitTimeSeconds)) - i-- - dAtA[i] = 0x28 - i -= len(m.Queue) - copy(dAtA[i:], m.Queue) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Queue))) - i-- - dAtA[i] = 0x22 - i -= len(m.Region) - copy(dAtA[i:], m.Region) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) - i-- - dAtA[i] = 0x1a - if m.SecretKey != nil { + if m.TLS != nil { { - size, err := m.SecretKey.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5181,11 +6347,19 @@ func (m *SQSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x32 } - if m.AccessKey != nil { + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.ConnectionBackoff != nil { { - size, err := m.AccessKey.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5193,12 +6367,27 @@ func (m *SQSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x22 } + i -= len(m.Channel) + copy(dAtA[i:], m.Channel) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Channel))) + i-- + dAtA[i] = 0x1a + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x12 + i -= len(m.HostAddress) + copy(dAtA[i:], m.HostAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostAddress))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Selector) Marshal() (dAtA []byte, err error) { +func (m *OwnedRepositories) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5208,35 +6397,34 @@ func (m *Selector) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Selector) MarshalTo(dAtA []byte) (int, error) { +func (m *OwnedRepositories) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Selector) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *OwnedRepositories) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - i -= len(m.Operation) - copy(dAtA[i:], m.Operation) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Owner))) + i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *Service) Marshal() (dAtA []byte, err error) { +func (m *PayloadEnrichmentFlags) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5246,39 +6434,28 @@ func (m *Service) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Service) MarshalTo(dAtA []byte) (int, error) { +func (m *PayloadEnrichmentFlags) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PayloadEnrichmentFlags) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.ClusterIP) - copy(dAtA[i:], m.ClusterIP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) i-- - dAtA[i] = 0x12 - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + if m.FetchPROnPRCommentAdded { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i-- + dAtA[i] = 0x8 return len(dAtA) - i, nil } -func (m *SlackEventSource) Marshal() (dAtA []byte, err error) { +func (m *PubSubEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5288,16 +6465,28 @@ func (m *SlackEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SlackEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *PubSubEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SlackEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PubSubEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -5319,36 +6508,28 @@ func (m *SlackEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x42 } } - if m.Webhook != nil { - { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.Token != nil { - { - size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + i-- + dAtA[i] = 0x38 + i-- + if m.DeleteSubscriptionOnFinish { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.SigningSecret != nil { + i-- + dAtA[i] = 0x30 + if m.CredentialSecret != nil { { - size, err := m.SigningSecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.CredentialSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5356,12 +6537,32 @@ func (m *SlackEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x2a } + i -= len(m.SubscriptionID) + copy(dAtA[i:], m.SubscriptionID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubscriptionID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x1a + i -= len(m.TopicProjectID) + copy(dAtA[i:], m.TopicProjectID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicProjectID))) + i-- + dAtA[i] = 0x12 + i -= len(m.ProjectID) + copy(dAtA[i:], m.ProjectID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProjectID))) + i-- + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *StorageGridEventSource) Marshal() (dAtA []byte, err error) { +func (m *PulsarEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5371,72 +6572,52 @@ func (m *StorageGridEventSource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StorageGridEventSource) MarshalTo(dAtA []byte) (int, error) { +func (m *PulsarEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StorageGridEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PulsarEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) + if m.AuthAthenzSecret != nil { + { + size, err := m.AuthAthenzSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[string(keysForMetadata[iNdEx])] + i-- + dAtA[i] = 0x72 + } + if len(m.AuthAthenzParams) > 0 { + keysForAuthAthenzParams := make([]string, 0, len(m.AuthAthenzParams)) + for k := range m.AuthAthenzParams { + keysForAuthAthenzParams = append(keysForAuthAthenzParams, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuthAthenzParams) + for iNdEx := len(keysForAuthAthenzParams) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuthAthenzParams[string(keysForAuthAthenzParams[iNdEx])] baseI := i i -= len(v) copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i -= len(keysForAuthAthenzParams[iNdEx]) + copy(dAtA[i:], keysForAuthAthenzParams[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuthAthenzParams[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x4a - } - } - i -= len(m.APIURL) - copy(dAtA[i:], m.APIURL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIURL))) - i-- - dAtA[i] = 0x42 - if m.AuthToken != nil { - { - size, err := m.AuthToken.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x6a } - i-- - dAtA[i] = 0x3a } - i -= len(m.Region) - copy(dAtA[i:], m.Region) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) - i-- - dAtA[i] = 0x32 - i -= len(m.Bucket) - copy(dAtA[i:], m.Bucket) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) - i-- - dAtA[i] = 0x2a - i -= len(m.TopicArn) - copy(dAtA[i:], m.TopicArn) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicArn))) - i-- - dAtA[i] = 0x22 if m.Filter != nil { { size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) @@ -5447,20 +6628,11 @@ func (m *StorageGridEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a - } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Events[iNdEx]) - copy(dAtA[i:], m.Events[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0x62 } - if m.Webhook != nil { + if m.AuthTokenSecret != nil { { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.AuthTokenSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5468,64 +6640,8 @@ func (m *StorageGridEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StorageGridFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageGridFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StorageGridFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Suffix) - copy(dAtA[i:], m.Suffix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Suffix))) - i-- - dAtA[i] = 0x12 - i -= len(m.Prefix) - copy(dAtA[i:], m.Prefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StripeEventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + dAtA[i] = 0x5a } - return dAtA[:n], nil -} - -func (m *StripeEventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StripeEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) for k := range m.Metadata { @@ -5547,21 +6663,32 @@ func (m *StripeEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x52 } } - if len(m.EventFilter) > 0 { - for iNdEx := len(m.EventFilter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.EventFilter[iNdEx]) - copy(dAtA[i:], m.EventFilter[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventFilter[iNdEx]))) - i-- - dAtA[i] = 0x22 + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.ConnectionBackoff != nil { + { + size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if m.APIKey != nil { + if m.TLS != nil { { - size, err := m.APIKey.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5569,19 +6696,27 @@ func (m *StripeEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x1a + dAtA[i] = 0x3a } i-- - if m.CreateWebhook { + if m.TLSValidateHostname { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x10 - if m.Webhook != nil { + dAtA[i] = 0x30 + i-- + if m.TLSAllowInsecureConnection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.TLSTrustCertsSecret != nil { { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TLSTrustCertsSecret.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5589,12 +6724,31 @@ func (m *StripeEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x22 + } + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x1a + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + if len(m.Topics) > 0 { + for iNdEx := len(m.Topics) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Topics[iNdEx]) + copy(dAtA[i:], m.Topics[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topics[iNdEx]))) + i-- + dAtA[i] = 0xa + } } return len(dAtA) - i, nil } -func (m *Template) Marshal() (dAtA []byte, err error) { +func (m *RedisEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5604,81 +6758,68 @@ func (m *Template) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Template) MarshalTo(dAtA []byte) (int, error) { +func (m *RedisEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RedisEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if m.Priority != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - i-- - dAtA[i] = 0x58 - } - i -= len(m.PriorityClassName) - copy(dAtA[i:], m.PriorityClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) i-- dAtA[i] = 0x52 - if len(m.ImagePullSecrets) > 0 { - for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x4a + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x42 } - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] baseI := i i -= len(v) copy(dAtA[i:], v) i = encodeVarintGenerated(dAtA, i, uint64(len(v))) i-- dAtA[i] = 0x12 - i -= len(keysForNodeSelector[iNdEx]) - copy(dAtA[i:], keysForNodeSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) i-- dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x42 - } - } - if len(m.Tolerations) > 0 { - for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x3a } } - if m.Affinity != nil { + if m.TLS != nil { { - size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5688,52 +6829,26 @@ func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x32 } - if m.SecurityContext != nil { - { - size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } + if len(m.Channels) > 0 { + for iNdEx := len(m.Channels) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Channels[iNdEx]) + copy(dAtA[i:], m.Channels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Channels[iNdEx]))) i-- - dAtA[i] = 0x22 - } - } - if m.Container != nil { - { - size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x2a } - i-- - dAtA[i] = 0x1a } - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i = encodeVarintGenerated(dAtA, i, uint64(m.DB)) i-- - dAtA[i] = 0x12 - if m.Metadata != nil { + dAtA[i] = 0x20 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + if m.Password != nil { { - size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5741,50 +6856,17 @@ func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WatchPathConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + dAtA[i] = 0x12 } - return dAtA[:n], nil -} - -func (m *WatchPathConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchPathConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.PathRegexp) - copy(dAtA[i:], m.PathRegexp) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathRegexp))) - i-- - dAtA[i] = 0x1a - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - i -= len(m.Directory) - copy(dAtA[i:], m.Directory) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i -= len(m.HostAddress) + copy(dAtA[i:], m.HostAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostAddress))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *WebhookContext) Marshal() (dAtA []byte, err error) { +func (m *RedisStreamEventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -5794,29 +6876,24 @@ func (m *WebhookContext) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WebhookContext) MarshalTo(dAtA []byte) (int, error) { +func (m *RedisStreamEventSource) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *WebhookContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *RedisStreamEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.DeprecatedServerKeyPath) - copy(dAtA[i:], m.DeprecatedServerKeyPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServerKeyPath))) + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) i-- dAtA[i] = 0x52 - i -= len(m.DeprecatedServerCertPath) - copy(dAtA[i:], m.DeprecatedServerCertPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServerCertPath))) - i-- - dAtA[i] = 0x4a - if m.AuthSecret != nil { + if m.Filter != nil { { - size, err := m.AuthSecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5824,7 +6901,7 @@ func (m *WebhookContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x42 + dAtA[i] = 0x4a } if len(m.Metadata) > 0 { keysForMetadata := make([]string, 0, len(m.Metadata)) @@ -5847,12 +6924,12 @@ func (m *WebhookContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0xa i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) i-- - dAtA[i] = 0x3a + dAtA[i] = 0x42 } } - if m.ServerKeySecret != nil { + if m.TLS != nil { { - size, err := m.ServerKeySecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5860,11 +6937,31 @@ func (m *WebhookContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x32 + dAtA[i] = 0x3a } - if m.ServerCertSecret != nil { + i -= len(m.ConsumerGroup) + copy(dAtA[i:], m.ConsumerGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConsumerGroup))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxMsgCountPerRead)) + i-- + dAtA[i] = 0x28 + if len(m.Streams) > 0 { + for iNdEx := len(m.Streams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Streams[iNdEx]) + copy(dAtA[i:], m.Streams[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Streams[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DB)) + i-- + dAtA[i] = 0x18 + if m.Password != nil { { - size, err := m.ServerCertSecret.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -5872,1344 +6969,1367 @@ func (m *WebhookContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x2a + dAtA[i] = 0x12 } - i -= len(m.URL) - copy(dAtA[i:], m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) - i-- - dAtA[i] = 0x22 - i -= len(m.Port) - copy(dAtA[i:], m.Port) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Port))) - i-- - dAtA[i] = 0x1a - i -= len(m.Method) - copy(dAtA[i:], m.Method) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Method))) - i-- - dAtA[i] = 0x12 - i -= len(m.Endpoint) - copy(dAtA[i:], m.Endpoint) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i -= len(m.HostAddress) + copy(dAtA[i:], m.HostAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostAddress))) i-- dAtA[i] = 0xa return len(dAtA) - i, nil } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *ResourceEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *AMQPConsumeConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ConsumerTag) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - n += 2 - return n + +func (m *ResourceEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AMQPEventSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResourceEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExchangeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExchangeType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RoutingKey) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConnectionBackoff != nil { - l = m.ConnectionBackoff.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + i -= len(m.Cluster) + copy(dAtA[i:], m.Cluster) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Cluster))) + i-- + dAtA[i] = 0x32 if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - if m.ExchangeDeclare != nil { - l = m.ExchangeDeclare.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.QueueDeclare != nil { - l = m.QueueDeclare.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.QueueBind != nil { - l = m.QueueBind.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Consume != nil { - l = m.Consume.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.EventTypes) > 0 { + for iNdEx := len(m.EventTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EventTypes[iNdEx]) + copy(dAtA[i:], m.EventTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } } - if m.Auth != nil { - l = m.Auth.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.GroupVersionResource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n -} - -func (m *AMQPExchangeDeclareConfig) Size() (n int) { - if m == nil { - return 0 + i-- + dAtA[i] = 0x1a + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - return n + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *AMQPQueueBindConfig) Size() (n int) { - if m == nil { - return 0 +func (m *ResourceFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - n += 2 - return n + return dAtA[:n], nil } -func (m *AMQPQueueDeclareConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - n += 2 - return n +func (m *ResourceFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *AzureEventsHubEventSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *ResourceFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.FQDN) - n += 1 + l + sovGenerated(uint64(l)) - if m.SharedAccessKeyName != nil { - l = m.SharedAccessKeyName.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.AfterStart { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.SharedAccessKey != nil { - l = m.SharedAccessKey.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x28 + { + size, err := m.CreatedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - l = len(m.HubName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *CalendarEventSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Schedule) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Interval) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExclusionDates) > 0 { - for _, s := range m.ExclusionDates { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i-- + dAtA[i] = 0x22 + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } } - l = len(m.Timezone) - n += 1 + l + sovGenerated(uint64(l)) - if m.UserPayload != nil { - l = len(m.UserPayload) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } } - if m.Persistence != nil { - l = m.Persistence.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *CatchupConfiguration) Size() (n int) { - if m == nil { - return 0 +func (m *SFTPEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - n += 2 - l = len(m.MaxDuration) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *ConfigMapPersistence) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n +func (m *SFTPEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EmitterEventSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *SFTPEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Broker) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ChannelKey) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ChannelName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Username != nil { - l = m.Username.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Password != nil { - l = m.Password.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.PollIntervalDuration) + copy(dAtA[i:], m.PollIntervalDuration) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PollIntervalDuration))) + i-- + dAtA[i] = 0x4a + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } - if m.ConnectionBackoff != nil { - l = m.ConnectionBackoff.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } } - n += 2 - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Address != nil { + { + size, err := m.Address.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SSHKeySecret != nil { + { + size, err := m.SSHKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - return n -} - -func (m *EventPersistence) Size() (n int) { - if m == nil { - return 0 + if m.Password != nil { + { + size, err := m.Password.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 } - var l int - _ = l - if m.Catchup != nil { - l = m.Catchup.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Username != nil { + { + size, err := m.Username.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.WatchPathConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0x12 + i -= len(m.EventType) + copy(dAtA[i:], m.EventType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventType))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *EventSource) Size() (n int) { - if m == nil { - return 0 +func (m *SNSEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *EventSourceList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n +func (m *SNSEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *EventSourceSpec) Size() (n int) { - if m == nil { - return 0 - } +func (m *SNSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.EventBusName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DeprecatedReplica != nil { - n += 1 + sovGenerated(uint64(*m.DeprecatedReplica)) - } - if len(m.Minio) > 0 { - for k, v := range m.Minio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i-- + dAtA[i] = 0x52 + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x4a } - if len(m.Calendar) > 0 { - for k, v := range m.Calendar { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + i-- + if m.ValidateSignature { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.File) > 0 { - for k, v := range m.File { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x40 + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) } - } - if len(m.Resource) > 0 { - for k, v := range m.Resource { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a } } - if len(m.Webhook) > 0 { - for k, v := range m.Webhook { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.RoleARN) + copy(dAtA[i:], m.RoleARN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) + i-- + dAtA[i] = 0x32 + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x2a + if m.SecretKey != nil { + { + size, err := m.SecretKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x22 } - if len(m.AMQP) > 0 { - for k, v := range m.AMQP { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.AccessKey != nil { + { + size, err := m.AccessKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - if len(m.Kafka) > 0 { - for k, v := range m.Kafka { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.TopicArn) + copy(dAtA[i:], m.TopicArn) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicArn))) + i-- + dAtA[i] = 0x12 + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if len(m.MQTT) > 0 { - for k, v := range m.MQTT { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + return len(dAtA) - i, nil +} + +func (m *SQSEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - if len(m.NATS) > 0 { - for k, v := range m.NATS { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + return dAtA[:n], nil +} + +func (m *SQSEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SQSEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SessionToken != nil { + { + size, err := m.SessionToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x6a } - if len(m.SNS) > 0 { - for k, v := range m.SNS { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i-- + dAtA[i] = 0x62 + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x5a } - if len(m.SQS) > 0 { - for k, v := range m.SQS { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + i-- + if m.DLQ { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.PubSub) > 0 { - for k, v := range m.PubSub { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Github) > 0 { - for k, v := range m.Github { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Gitlab) > 0 { - for k, v := range m.Gitlab { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.HDFS) > 0 { - for k, v := range m.HDFS { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Slack) > 0 { - for k, v := range m.Slack { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.StorageGrid) > 0 { - for k, v := range m.StorageGrid { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.AzureEventsHub) > 0 { - for k, v := range m.AzureEventsHub { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Stripe) > 0 { - for k, v := range m.Stripe { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Emitter) > 0 { - for k, v := range m.Emitter { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x50 + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) } - } - if len(m.Redis) > 0 { - for k, v := range m.Redis { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a } } - if len(m.NSQ) > 0 { - for k, v := range m.NSQ { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } + i -= len(m.QueueAccountID) + copy(dAtA[i:], m.QueueAccountID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QueueAccountID))) + i-- + dAtA[i] = 0x42 + i-- + if m.JSONBody { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.Pulsar) > 0 { - for k, v := range m.Pulsar { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x38 + i -= len(m.RoleARN) + copy(dAtA[i:], m.RoleARN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.WaitTimeSeconds)) + i-- + dAtA[i] = 0x28 + i -= len(m.Queue) + copy(dAtA[i:], m.Queue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Queue))) + i-- + dAtA[i] = 0x22 + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x1a + if m.SecretKey != nil { + { + size, err := m.SecretKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x12 } - if len(m.Generic) > 0 { - for k, v := range m.Generic { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + if m.AccessKey != nil { + { + size, err := m.AccessKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - if m.Replicas != nil { - n += 2 + sovGenerated(uint64(*m.Replicas)) - } - return n + return len(dAtA) - i, nil } -func (m *EventSourceStatus) Size() (n int) { - if m == nil { - return 0 +func (m *Selector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *FileEventSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *Selector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Selector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.EventType) - n += 1 + l + sovGenerated(uint64(l)) - l = m.WatchPathConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *GenericEventSource) Size() (n int) { - if m == nil { - return 0 +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Config) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AuthSecret != nil { - l = m.AuthSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + return dAtA[:n], nil } -func (m *GithubEventSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - n += 1 + sovGenerated(uint64(m.ID)) - if m.Webhook != nil { - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.DeprecatedOwner) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedRepository) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Events) > 0 { - for _, s := range m.Events { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.APIToken != nil { - l = m.APIToken.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.WebhookSecret != nil { - l = m.WebhookSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - n += 2 - l = len(m.ContentType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GithubBaseURL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.GithubUploadURL) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Repositories) > 0 { - for _, e := range m.Repositories { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.ClusterIP) + copy(dAtA[i:], m.ClusterIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i-- + dAtA[i] = 0x12 + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } } - return n + return len(dAtA) - i, nil } -func (m *GitlabEventSource) Size() (n int) { - if m == nil { - return 0 +func (m *SlackEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *SlackEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SlackEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Webhook != nil { - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ProjectID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Events) > 0 { - for _, s := range m.Events { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x2a } - if m.AccessToken != nil { - l = m.AccessToken.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.GitlabBaseURL) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) } - } - return n -} - -func (m *HDFSEventSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.WatchPathConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.CheckInterval) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Addresses) > 0 { - for _, s := range m.Addresses { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 } } - l = len(m.HDFSUser) - n += 1 + l + sovGenerated(uint64(l)) - if m.KrbCCacheSecret != nil { - l = m.KrbCCacheSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.KrbKeytabSecret != nil { - l = m.KrbKeytabSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - l = len(m.KrbUsername) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KrbRealm) - n += 1 + l + sovGenerated(uint64(l)) - if m.KrbConfigConfigMap != nil { - l = m.KrbConfigConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Token != nil { + { + size, err := m.Token.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - l = len(m.KrbServicePrincipalName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.SigningSecret != nil { + { + size, err := m.SigningSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *KafkaConsumerGroup) Size() (n int) { - if m == nil { - return 0 +func (m *StorageGridEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.GroupName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.RebalanceStrategy) - n += 1 + l + sovGenerated(uint64(l)) - return n + return dAtA[:n], nil } -func (m *KafkaEventSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *StorageGridEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageGridEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Partition) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Topic) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConnectionBackoff != nil { - l = m.ConnectionBackoff.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x4a } } - if m.ConsumerGroup != nil { - l = m.ConsumerGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.LimitEventsPerSecond)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - if m.SASL != nil { - l = m.SASL.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MQTTEventSource) Size() (n int) { - if m == nil { - return 0 + i -= len(m.APIURL) + copy(dAtA[i:], m.APIURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIURL))) + i-- + dAtA[i] = 0x42 + if m.AuthToken != nil { + { + size, err := m.AuthToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } - var l int - _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Topic) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ClientID) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConnectionBackoff != nil { - l = m.ConnectionBackoff.Size() - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.Region) + copy(dAtA[i:], m.Region) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Region))) + i-- + dAtA[i] = 0x32 + i -= len(m.Bucket) + copy(dAtA[i:], m.Bucket) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Bucket))) + i-- + dAtA[i] = 0x2a + i -= len(m.TopicArn) + copy(dAtA[i:], m.TopicArn) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicArn))) + i-- + dAtA[i] = 0x22 + if m.Filter != nil { + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - n += 2 - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Events[iNdEx]) + copy(dAtA[i:], m.Events[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Events[iNdEx]))) + i-- + dAtA[i] = 0x12 + } } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *NATSAuth) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Basic != nil { - l = m.Basic.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Token != nil { - l = m.Token.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NKey != nil { - l = m.NKey.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Credential != nil { - l = m.Credential.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (m *StorageGridFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *NATSEventsSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *StorageGridFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageGridFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subject) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConnectionBackoff != nil { - l = m.ConnectionBackoff.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Auth != nil { - l = m.Auth.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + i -= len(m.Suffix) + copy(dAtA[i:], m.Suffix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Suffix))) + i-- + dAtA[i] = 0x12 + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *NSQEventSource) Size() (n int) { - if m == nil { - return 0 +func (m *StripeEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *StripeEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StripeEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.HostAddress) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Topic) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Channel) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConnectionBackoff != nil { - l = m.ConnectionBackoff.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) } - } - return n -} - -func (m *OwnedRepositories) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a } } - return n -} - -func (m *PubSubEventSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProjectID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.TopicProjectID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Topic) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubscriptionID) - n += 1 + l + sovGenerated(uint64(l)) - if m.CredentialSecret != nil { - l = m.CredentialSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - n += 2 - l = len(m.DeprecatedCredentialsFile) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.EventFilter) > 0 { + for iNdEx := len(m.EventFilter) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EventFilter[iNdEx]) + copy(dAtA[i:], m.EventFilter[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventFilter[iNdEx]))) + i-- + dAtA[i] = 0x22 } } - return n -} - -func (m *PulsarEventSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Topics) > 0 { - for _, s := range m.Topics { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + if m.APIKey != nil { + { + size, err := m.APIKey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1a } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.URL) - n += 1 + l + sovGenerated(uint64(l)) - if m.TLSTrustCertsSecret != nil { - l = m.TLSTrustCertsSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - n += 2 - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConnectionBackoff != nil { - l = m.ConnectionBackoff.Size() - n += 1 + l + sovGenerated(uint64(l)) + i-- + if m.CreateWebhook { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - n += 2 - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i-- + dAtA[i] = 0x10 + if m.Webhook != nil { + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - return n + return len(dAtA) - i, nil } -func (m *RedisEventSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.HostAddress) - n += 1 + l + sovGenerated(uint64(l)) - if m.Password != nil { - l = m.Password.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.DB)) - if len(m.Channels) > 0 { - for _, s := range m.Channels { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.TLS != nil { - l = m.TLS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (m *Template) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ResourceEventSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *Template) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - if m.Filter != nil { - l = m.Filter.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x58 } - l = m.GroupVersionResource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.EventTypes) > 0 { - for _, s := range m.EventTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x52 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a } } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) } - } - return n -} - -func (m *ResourceFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Labels) > 0 { - for _, e := range m.Labels { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x42 } } - if len(m.Fields) > 0 { - for _, e := range m.Fields { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a } } - l = m.CreatedBy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *SNSEventSource) Size() (n int) { - if m == nil { - return 0 + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 } - var l int - _ = l - if m.Webhook != nil { - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a } - l = len(m.TopicArn) - n += 1 + l + sovGenerated(uint64(l)) - if m.AccessKey != nil { - l = m.AccessKey.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } } - if m.SecretKey != nil { - l = m.SecretKey.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Container != nil { + { + size, err := m.Container.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a } - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RoleARN) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x12 + if m.Metadata != nil { + { + size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0xa } - n += 2 - return n + return len(dAtA) - i, nil } -func (m *SQSEventSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AccessKey != nil { - l = m.AccessKey.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKey != nil { - l = m.SecretKey.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Queue) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.WaitTimeSeconds)) - l = len(m.RoleARN) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.QueueAccountID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (m *WatchPathConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Selector) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n +func (m *WatchPathConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Service) Size() (n int) { - if m == nil { - return 0 - } +func (m *WatchPathConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - return n + i -= len(m.PathRegexp) + copy(dAtA[i:], m.PathRegexp) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathRegexp))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *SlackEventSource) Size() (n int) { - if m == nil { - return 0 +func (m *WebhookContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *WebhookContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.SigningSecret != nil { - l = m.SigningSecret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Token != nil { - l = m.Token.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Webhook != nil { - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.MaxPayloadSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MaxPayloadSize)) + i-- + dAtA[i] = 0x48 + } + if m.AuthSecret != nil { + { + size, err := m.AuthSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 } if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.Metadata[string(keysForMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMetadata[iNdEx]) + copy(dAtA[i:], keysForMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a } } - return n + if m.ServerKeySecret != nil { + { + size, err := m.ServerKeySecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.ServerCertSecret != nil { + { + size, err := m.ServerCertSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0x22 + i -= len(m.Port) + copy(dAtA[i:], m.Port) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Port))) + i-- + dAtA[i] = 0x1a + i -= len(m.Method) + copy(dAtA[i:], m.Method) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Method))) + i-- + dAtA[i] = 0x12 + i -= len(m.Endpoint) + copy(dAtA[i:], m.Endpoint) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Endpoint))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageGridEventSource) Size() (n int) { - if m == nil { - return 0 +func (m *WebhookEventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *WebhookEventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookEventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if m.Webhook != nil { - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Events) > 0 { - for _, s := range m.Events { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } if m.Filter != nil { - l = m.Filter.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.TopicArn) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Bucket) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Region) - n += 1 + l + sovGenerated(uint64(l)) - if m.AuthToken != nil { - l = m.AuthToken.Size() - n += 1 + l + sovGenerated(uint64(l)) + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 } - l = len(m.APIURL) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + { + size, err := m.WebhookContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - return n + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil } -func (m *StorageGridFilter) Size() (n int) { +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AMQPConsumeConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Suffix) + l = len(m.ConsumerTag) n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + n += 2 return n } -func (m *StripeEventSource) Size() (n int) { +func (m *AMQPEventSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.Webhook != nil { - l = m.Webhook.Size() + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExchangeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExchangeType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RoutingKey) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() n += 1 + l + sovGenerated(uint64(l)) } n += 2 - if m.APIKey != nil { - l = m.APIKey.Size() + if m.TLS != nil { + l = m.TLS.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.EventFilter) > 0 { - for _, s := range m.EventFilter { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } if len(m.Metadata) > 0 { for k, v := range m.Metadata { _ = k @@ -7218,104 +8338,95 @@ func (m *StripeEventSource) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return n -} - -func (m *Template) Size() (n int) { - if m == nil { - return 0 + if m.ExchangeDeclare != nil { + l = m.ExchangeDeclare.Size() + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() + if m.QueueDeclare != nil { + l = m.QueueDeclare.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Container != nil { - l = m.Container.Size() + if m.QueueBind != nil { + l = m.QueueBind.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Consume != nil { + l = m.Consume.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.SecurityContext != nil { - l = m.SecurityContext.Size() + if m.Auth != nil { + l = m.Auth.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Affinity != nil { - l = m.Affinity.Size() + if m.URLSecret != nil { + l = m.URLSecret.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + return n +} + +func (m *AMQPExchangeDeclareConfig) Size() (n int) { + if m == nil { + return 0 } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.PriorityClassName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 1 + sovGenerated(uint64(*m.Priority)) + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + return n +} + +func (m *AMQPQueueBindConfig) Size() (n int) { + if m == nil { + return 0 } + var l int + _ = l + n += 2 return n } -func (m *WatchPathConfig) Size() (n int) { +func (m *AMQPQueueDeclareConfig) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PathRegexp) + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Arguments) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *WebhookContext) Size() (n int) { +func (m *AzureEventsHubEventSource) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Endpoint) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Method) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Port) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.URL) + l = len(m.FQDN) n += 1 + l + sovGenerated(uint64(l)) - if m.ServerCertSecret != nil { - l = m.ServerCertSecret.Size() + if m.SharedAccessKeyName != nil { + l = m.SharedAccessKeyName.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ServerKeySecret != nil { - l = m.ServerKeySecret.Size() + if m.SharedAccessKey != nil { + l = m.SharedAccessKey.Size() n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.HubName) + n += 1 + l + sovGenerated(uint64(l)) if len(m.Metadata) > 0 { for k, v := range m.Metadata { _ = k @@ -7324,1217 +8435,8666 @@ func (m *WebhookContext) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.AuthSecret != nil { - l = m.AuthSecret.Size() + if m.Filter != nil { + l = m.Filter.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.DeprecatedServerCertPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServerKeyPath) - n += 1 + l + sovGenerated(uint64(l)) return n } -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AMQPConsumeConfig) String() string { - if this == nil { - return "nil" +func (m *AzureQueueStorageEventSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AMQPConsumeConfig{`, - `ConsumerTag:` + fmt.Sprintf("%v", this.ConsumerTag) + `,`, - `AutoAck:` + fmt.Sprintf("%v", this.AutoAck) + `,`, - `Exclusive:` + fmt.Sprintf("%v", this.Exclusive) + `,`, - `NoLocal:` + fmt.Sprintf("%v", this.NoLocal) + `,`, - `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, - `}`, - }, "") - return s -} -func (this *AMQPEventSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.StorageAccountName) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConnectionString != nil { + l = m.ConnectionString.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + l = len(m.QueueName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&AMQPEventSource{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `ExchangeName:` + fmt.Sprintf("%v", this.ExchangeName) + `,`, - `ExchangeType:` + fmt.Sprintf("%v", this.ExchangeType) + `,`, - `RoutingKey:` + fmt.Sprintf("%v", this.RoutingKey) + `,`, - `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `ExchangeDeclare:` + strings.Replace(this.ExchangeDeclare.String(), "AMQPExchangeDeclareConfig", "AMQPExchangeDeclareConfig", 1) + `,`, - `QueueDeclare:` + strings.Replace(this.QueueDeclare.String(), "AMQPQueueDeclareConfig", "AMQPQueueDeclareConfig", 1) + `,`, - `QueueBind:` + strings.Replace(this.QueueBind.String(), "AMQPQueueBindConfig", "AMQPQueueBindConfig", 1) + `,`, - `Consume:` + strings.Replace(this.Consume.String(), "AMQPConsumeConfig", "AMQPConsumeConfig", 1) + `,`, - `Auth:` + strings.Replace(fmt.Sprintf("%v", this.Auth), "BasicAuth", "common.BasicAuth", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AMQPExchangeDeclareConfig) String() string { - if this == nil { - return "nil" + n += 2 + if m.WaitTimeInSeconds != nil { + n += 1 + sovGenerated(uint64(*m.WaitTimeInSeconds)) } - s := strings.Join([]string{`&AMQPExchangeDeclareConfig{`, - `Durable:` + fmt.Sprintf("%v", this.Durable) + `,`, - `AutoDelete:` + fmt.Sprintf("%v", this.AutoDelete) + `,`, - `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, - `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, - `}`, - }, "") - return s + return n } -func (this *AMQPQueueBindConfig) String() string { - if this == nil { - return "nil" + +func (m *AzureServiceBusEventSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&AMQPQueueBindConfig{`, - `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, - `}`, - }, "") - return s -} -func (this *AMQPQueueDeclareConfig) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.ConnectionString != nil { + l = m.ConnectionString.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&AMQPQueueDeclareConfig{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Durable:` + fmt.Sprintf("%v", this.Durable) + `,`, - `AutoDelete:` + fmt.Sprintf("%v", this.AutoDelete) + `,`, - `Exclusive:` + fmt.Sprintf("%v", this.Exclusive) + `,`, - `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, - `}`, - }, "") - return s -} -func (this *AzureEventsHubEventSource) String() string { - if this == nil { - return "nil" + l = len(m.QueueName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TopicName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubscriptionName) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&AzureEventsHubEventSource{`, - `FQDN:` + fmt.Sprintf("%v", this.FQDN) + `,`, - `SharedAccessKeyName:` + strings.Replace(fmt.Sprintf("%v", this.SharedAccessKeyName), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `SharedAccessKey:` + strings.Replace(fmt.Sprintf("%v", this.SharedAccessKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `HubName:` + fmt.Sprintf("%v", this.HubName) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s + l = len(m.FullyQualifiedNamespace) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *CalendarEventSource) String() string { - if this == nil { - return "nil" + +func (m *BitbucketAuth) Size() (n int) { + if m == nil { + return 0 } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + var l int + _ = l + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.OAuthToken != nil { + l = m.OAuthToken.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&CalendarEventSource{`, - `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, - `Interval:` + fmt.Sprintf("%v", this.Interval) + `,`, - `ExclusionDates:` + fmt.Sprintf("%v", this.ExclusionDates) + `,`, - `Timezone:` + fmt.Sprintf("%v", this.Timezone) + `,`, - `UserPayload:` + valueToStringGenerated(this.UserPayload) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `Persistence:` + strings.Replace(this.Persistence.String(), "EventPersistence", "EventPersistence", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *CatchupConfiguration) String() string { - if this == nil { - return "nil" + +func (m *BitbucketBasicAuth) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&CatchupConfiguration{`, - `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, - `MaxDuration:` + fmt.Sprintf("%v", this.MaxDuration) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapPersistence) String() string { - if this == nil { - return "nil" + var l int + _ = l + if m.Username != nil { + l = m.Username.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&ConfigMapPersistence{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CreateIfNotExist:` + fmt.Sprintf("%v", this.CreateIfNotExist) + `,`, - `}`, - }, "") - return s + if m.Password != nil { + l = m.Password.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *EmitterEventSource) String() string { - if this == nil { - return "nil" + +func (m *BitbucketEventSource) Size() (n int) { + if m == nil { + return 0 } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + var l int + _ = l + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&EmitterEventSource{`, - `Broker:` + fmt.Sprintf("%v", this.Broker) + `,`, - `ChannelKey:` + fmt.Sprintf("%v", this.ChannelKey) + `,`, - `ChannelName:` + fmt.Sprintf("%v", this.ChannelName) + `,`, - `Username:` + strings.Replace(fmt.Sprintf("%v", this.Username), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *EventPersistence) String() string { - if this == nil { - return "nil" + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&EventPersistence{`, - `Catchup:` + strings.Replace(this.Catchup.String(), "CatchupConfiguration", "CatchupConfiguration", 1) + `,`, - `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapPersistence", "ConfigMapPersistence", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventSource) String() string { - if this == nil { - return "nil" + if len(m.Events) > 0 { + for _, s := range m.Events { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - s := strings.Join([]string{`&EventSource{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EventSourceSpec", "EventSourceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "EventSourceStatus", "EventSourceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventSourceList) String() string { - if this == nil { - return "nil" + l = len(m.DeprecatedOwner) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedProjectKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedRepositorySlug) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Repositories) > 0 { + for _, e := range m.Repositories { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForItems := "[]EventSource{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + "," + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForItems += "}" - s := strings.Join([]string{`&EventSourceList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s + return n } -func (this *EventSourceSpec) String() string { - if this == nil { - return "nil" + +func (m *BitbucketRepository) Size() (n int) { + if m == nil { + return 0 } - keysForMinio := make([]string, 0, len(this.Minio)) - for k := range this.Minio { - keysForMinio = append(keysForMinio, k) + var l int + _ = l + l = len(m.Owner) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RepositorySlug) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BitbucketServerEventSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForMinio) - mapStringForMinio := "map[string]common.S3Artifact{" - for _, k := range keysForMinio { - mapStringForMinio += fmt.Sprintf("%v: %v,", k, this.Minio[k]) + var l int + _ = l + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMinio += "}" - keysForCalendar := make([]string, 0, len(this.Calendar)) - for k := range this.Calendar { - keysForCalendar = append(keysForCalendar, k) + l = len(m.DeprecatedProjectKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedRepositorySlug) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Projects) > 0 { + for _, s := range m.Projects { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForCalendar) - mapStringForCalendar := "map[string]CalendarEventSource{" - for _, k := range keysForCalendar { - mapStringForCalendar += fmt.Sprintf("%v: %v,", k, this.Calendar[k]) + if len(m.Repositories) > 0 { + for _, e := range m.Repositories { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForCalendar += "}" - keysForFile := make([]string, 0, len(this.File)) - for k := range this.File { - keysForFile = append(keysForFile, k) + if len(m.Events) > 0 { + for _, s := range m.Events { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForFile) - mapStringForFile := "map[string]FileEventSource{" - for _, k := range keysForFile { - mapStringForFile += fmt.Sprintf("%v: %v,", k, this.File[k]) + n += 2 + if m.AccessToken != nil { + l = m.AccessToken.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForFile += "}" - keysForResource := make([]string, 0, len(this.Resource)) - for k := range this.Resource { - keysForResource = append(keysForResource, k) + if m.WebhookSecret != nil { + l = m.WebhookSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForResource) - mapStringForResource := "map[string]ResourceEventSource{" - for _, k := range keysForResource { - mapStringForResource += fmt.Sprintf("%v: %v,", k, this.Resource[k]) + l = len(m.BitbucketServerBaseURL) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForResource += "}" - keysForWebhook := make([]string, 0, len(this.Webhook)) - for k := range this.Webhook { - keysForWebhook = append(keysForWebhook, k) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForWebhook) - mapStringForWebhook := "map[string]WebhookContext{" - for _, k := range keysForWebhook { - mapStringForWebhook += fmt.Sprintf("%v: %v,", k, this.Webhook[k]) + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForWebhook += "}" - keysForAMQP := make([]string, 0, len(this.AMQP)) - for k := range this.AMQP { - keysForAMQP = append(keysForAMQP, k) + l = len(m.CheckInterval) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BitbucketServerRepository) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForAMQP) - mapStringForAMQP := "map[string]AMQPEventSource{" - for _, k := range keysForAMQP { - mapStringForAMQP += fmt.Sprintf("%v: %v,", k, this.AMQP[k]) + var l int + _ = l + l = len(m.ProjectKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RepositorySlug) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CalendarEventSource) Size() (n int) { + if m == nil { + return 0 } - mapStringForAMQP += "}" - keysForKafka := make([]string, 0, len(this.Kafka)) - for k := range this.Kafka { - keysForKafka = append(keysForKafka, k) + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Interval) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExclusionDates) > 0 { + for _, s := range m.ExclusionDates { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForKafka) - mapStringForKafka := "map[string]KafkaEventSource{" - for _, k := range keysForKafka { - mapStringForKafka += fmt.Sprintf("%v: %v,", k, this.Kafka[k]) + l = len(m.Timezone) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForKafka += "}" - keysForMQTT := make([]string, 0, len(this.MQTT)) - for k := range this.MQTT { - keysForMQTT = append(keysForMQTT, k) + if m.Persistence != nil { + l = m.Persistence.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMQTT) - mapStringForMQTT := "map[string]MQTTEventSource{" - for _, k := range keysForMQTT { - mapStringForMQTT += fmt.Sprintf("%v: %v,", k, this.MQTT[k]) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMQTT += "}" - keysForNATS := make([]string, 0, len(this.NATS)) - for k := range this.NATS { - keysForNATS = append(keysForNATS, k) + return n +} + +func (m *CatchupConfiguration) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForNATS) - mapStringForNATS := "map[string]NATSEventsSource{" - for _, k := range keysForNATS { - mapStringForNATS += fmt.Sprintf("%v: %v,", k, this.NATS[k]) + var l int + _ = l + n += 2 + l = len(m.MaxDuration) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapPersistence) Size() (n int) { + if m == nil { + return 0 } - mapStringForNATS += "}" - keysForSNS := make([]string, 0, len(this.SNS)) - for k := range this.SNS { - keysForSNS = append(keysForSNS, k) + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *EmitterEventSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForSNS) - mapStringForSNS := "map[string]SNSEventSource{" - for _, k := range keysForSNS { - mapStringForSNS += fmt.Sprintf("%v: %v,", k, this.SNS[k]) + var l int + _ = l + l = len(m.Broker) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ChannelKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ChannelName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Username != nil { + l = m.Username.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForSNS += "}" - keysForSQS := make([]string, 0, len(this.SQS)) - for k := range this.SQS { - keysForSQS = append(keysForSQS, k) + if m.Password != nil { + l = m.Password.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSQS) - mapStringForSQS := "map[string]SQSEventSource{" - for _, k := range keysForSQS { - mapStringForSQS += fmt.Sprintf("%v: %v,", k, this.SQS[k]) + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForSQS += "}" - keysForPubSub := make([]string, 0, len(this.PubSub)) - for k := range this.PubSub { - keysForPubSub = append(keysForPubSub, k) + n += 2 + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForPubSub) - mapStringForPubSub := "map[string]PubSubEventSource{" - for _, k := range keysForPubSub { - mapStringForPubSub += fmt.Sprintf("%v: %v,", k, this.PubSub[k]) + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForPubSub += "}" - keysForGithub := make([]string, 0, len(this.Github)) - for k := range this.Github { - keysForGithub = append(keysForGithub, k) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForGithub) - mapStringForGithub := "map[string]GithubEventSource{" - for _, k := range keysForGithub { - mapStringForGithub += fmt.Sprintf("%v: %v,", k, this.Github[k]) + return n +} + +func (m *EventPersistence) Size() (n int) { + if m == nil { + return 0 } - mapStringForGithub += "}" - keysForGitlab := make([]string, 0, len(this.Gitlab)) - for k := range this.Gitlab { - keysForGitlab = append(keysForGitlab, k) + var l int + _ = l + if m.Catchup != nil { + l = m.Catchup.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForGitlab) - mapStringForGitlab := "map[string]GitlabEventSource{" - for _, k := range keysForGitlab { - mapStringForGitlab += fmt.Sprintf("%v: %v,", k, this.Gitlab[k]) + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForGitlab += "}" - keysForHDFS := make([]string, 0, len(this.HDFS)) - for k := range this.HDFS { - keysForHDFS = append(keysForHDFS, k) + return n +} + +func (m *EventSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForHDFS) - mapStringForHDFS := "map[string]HDFSEventSource{" - for _, k := range keysForHDFS { - mapStringForHDFS += fmt.Sprintf("%v: %v,", k, this.HDFS[k]) + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventSourceFilter) Size() (n int) { + if m == nil { + return 0 } - mapStringForHDFS += "}" - keysForSlack := make([]string, 0, len(this.Slack)) - for k := range this.Slack { - keysForSlack = append(keysForSlack, k) + var l int + _ = l + l = len(m.Expression) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventSourceList) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForSlack) - mapStringForSlack := "map[string]SlackEventSource{" - for _, k := range keysForSlack { - mapStringForSlack += fmt.Sprintf("%v: %v,", k, this.Slack[k]) + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForSlack += "}" - keysForStorageGrid := make([]string, 0, len(this.StorageGrid)) - for k := range this.StorageGrid { - keysForStorageGrid = append(keysForStorageGrid, k) + return n +} + +func (m *EventSourceSpec) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForStorageGrid) - mapStringForStorageGrid := "map[string]StorageGridEventSource{" - for _, k := range keysForStorageGrid { - mapStringForStorageGrid += fmt.Sprintf("%v: %v,", k, this.StorageGrid[k]) + var l int + _ = l + l = len(m.EventBusName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForStorageGrid += "}" - keysForAzureEventsHub := make([]string, 0, len(this.AzureEventsHub)) - for k := range this.AzureEventsHub { - keysForAzureEventsHub = append(keysForAzureEventsHub, k) + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForAzureEventsHub) - mapStringForAzureEventsHub := "map[string]AzureEventsHubEventSource{" - for _, k := range keysForAzureEventsHub { - mapStringForAzureEventsHub += fmt.Sprintf("%v: %v,", k, this.AzureEventsHub[k]) + if len(m.Minio) > 0 { + for k, v := range m.Minio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForAzureEventsHub += "}" - keysForStripe := make([]string, 0, len(this.Stripe)) - for k := range this.Stripe { - keysForStripe = append(keysForStripe, k) + if len(m.Calendar) > 0 { + for k, v := range m.Calendar { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForStripe) - mapStringForStripe := "map[string]StripeEventSource{" - for _, k := range keysForStripe { - mapStringForStripe += fmt.Sprintf("%v: %v,", k, this.Stripe[k]) + if len(m.File) > 0 { + for k, v := range m.File { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForStripe += "}" - keysForEmitter := make([]string, 0, len(this.Emitter)) - for k := range this.Emitter { - keysForEmitter = append(keysForEmitter, k) + if len(m.Resource) > 0 { + for k, v := range m.Resource { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForEmitter) - mapStringForEmitter := "map[string]EmitterEventSource{" - for _, k := range keysForEmitter { - mapStringForEmitter += fmt.Sprintf("%v: %v,", k, this.Emitter[k]) + if len(m.Webhook) > 0 { + for k, v := range m.Webhook { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForEmitter += "}" - keysForRedis := make([]string, 0, len(this.Redis)) - for k := range this.Redis { - keysForRedis = append(keysForRedis, k) + if len(m.AMQP) > 0 { + for k, v := range m.AMQP { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForRedis) - mapStringForRedis := "map[string]RedisEventSource{" - for _, k := range keysForRedis { - mapStringForRedis += fmt.Sprintf("%v: %v,", k, this.Redis[k]) + if len(m.Kafka) > 0 { + for k, v := range m.Kafka { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForRedis += "}" - keysForNSQ := make([]string, 0, len(this.NSQ)) - for k := range this.NSQ { - keysForNSQ = append(keysForNSQ, k) + if len(m.MQTT) > 0 { + for k, v := range m.MQTT { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForNSQ) - mapStringForNSQ := "map[string]NSQEventSource{" - for _, k := range keysForNSQ { - mapStringForNSQ += fmt.Sprintf("%v: %v,", k, this.NSQ[k]) + if len(m.NATS) > 0 { + for k, v := range m.NATS { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForNSQ += "}" - keysForPulsar := make([]string, 0, len(this.Pulsar)) - for k := range this.Pulsar { - keysForPulsar = append(keysForPulsar, k) + if len(m.SNS) > 0 { + for k, v := range m.SNS { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForPulsar) - mapStringForPulsar := "map[string]PulsarEventSource{" - for _, k := range keysForPulsar { - mapStringForPulsar += fmt.Sprintf("%v: %v,", k, this.Pulsar[k]) + if len(m.SQS) > 0 { + for k, v := range m.SQS { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForPulsar += "}" - keysForGeneric := make([]string, 0, len(this.Generic)) - for k := range this.Generic { - keysForGeneric = append(keysForGeneric, k) + if len(m.PubSub) > 0 { + for k, v := range m.PubSub { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForGeneric) - mapStringForGeneric := "map[string]GenericEventSource{" - for _, k := range keysForGeneric { - mapStringForGeneric += fmt.Sprintf("%v: %v,", k, this.Generic[k]) + if len(m.Github) > 0 { + for k, v := range m.Github { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForGeneric += "}" - s := strings.Join([]string{`&EventSourceSpec{`, - `EventBusName:` + fmt.Sprintf("%v", this.EventBusName) + `,`, - `Template:` + strings.Replace(this.Template.String(), "Template", "Template", 1) + `,`, - `Service:` + strings.Replace(this.Service.String(), "Service", "Service", 1) + `,`, - `DeprecatedReplica:` + valueToStringGenerated(this.DeprecatedReplica) + `,`, - `Minio:` + mapStringForMinio + `,`, - `Calendar:` + mapStringForCalendar + `,`, - `File:` + mapStringForFile + `,`, - `Resource:` + mapStringForResource + `,`, - `Webhook:` + mapStringForWebhook + `,`, - `AMQP:` + mapStringForAMQP + `,`, - `Kafka:` + mapStringForKafka + `,`, - `MQTT:` + mapStringForMQTT + `,`, - `NATS:` + mapStringForNATS + `,`, - `SNS:` + mapStringForSNS + `,`, - `SQS:` + mapStringForSQS + `,`, - `PubSub:` + mapStringForPubSub + `,`, - `Github:` + mapStringForGithub + `,`, - `Gitlab:` + mapStringForGitlab + `,`, - `HDFS:` + mapStringForHDFS + `,`, - `Slack:` + mapStringForSlack + `,`, - `StorageGrid:` + mapStringForStorageGrid + `,`, - `AzureEventsHub:` + mapStringForAzureEventsHub + `,`, - `Stripe:` + mapStringForStripe + `,`, - `Emitter:` + mapStringForEmitter + `,`, - `Redis:` + mapStringForRedis + `,`, - `NSQ:` + mapStringForNSQ + `,`, - `Pulsar:` + mapStringForPulsar + `,`, - `Generic:` + mapStringForGeneric + `,`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *EventSourceStatus) String() string { - if this == nil { - return "nil" + if len(m.Gitlab) > 0 { + for k, v := range m.Gitlab { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - s := strings.Join([]string{`&EventSourceStatus{`, - `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "common.Status", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *FileEventSource) String() string { - if this == nil { - return "nil" + if len(m.HDFS) > 0 { + for k, v := range m.HDFS { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if len(m.Slack) > 0 { + for k, v := range m.Slack { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if len(m.StorageGrid) > 0 { + for k, v := range m.StorageGrid { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&FileEventSource{`, - `EventType:` + fmt.Sprintf("%v", this.EventType) + `,`, - `WatchPathConfig:` + strings.Replace(strings.Replace(this.WatchPathConfig.String(), "WatchPathConfig", "WatchPathConfig", 1), `&`, ``, 1) + `,`, - `Polling:` + fmt.Sprintf("%v", this.Polling) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *GenericEventSource) String() string { - if this == nil { - return "nil" + if len(m.AzureEventsHub) > 0 { + for k, v := range m.AzureEventsHub { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if len(m.Stripe) > 0 { + for k, v := range m.Stripe { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if len(m.Emitter) > 0 { + for k, v := range m.Emitter { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&GenericEventSource{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Config:` + fmt.Sprintf("%v", this.Config) + `,`, - `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `AuthSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *GithubEventSource) String() string { - if this == nil { - return "nil" + if len(m.Redis) > 0 { + for k, v := range m.Redis { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - repeatedStringForRepositories := "[]OwnedRepositories{" - for _, f := range this.Repositories { - repeatedStringForRepositories += strings.Replace(strings.Replace(f.String(), "OwnedRepositories", "OwnedRepositories", 1), `&`, ``, 1) + "," + if len(m.NSQ) > 0 { + for k, v := range m.NSQ { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - repeatedStringForRepositories += "}" - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if len(m.Pulsar) > 0 { + for k, v := range m.Pulsar { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if len(m.Generic) > 0 { + for k, v := range m.Generic { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&GithubEventSource{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, - `DeprecatedOwner:` + fmt.Sprintf("%v", this.DeprecatedOwner) + `,`, - `DeprecatedRepository:` + fmt.Sprintf("%v", this.DeprecatedRepository) + `,`, - `Events:` + fmt.Sprintf("%v", this.Events) + `,`, - `APIToken:` + strings.Replace(fmt.Sprintf("%v", this.APIToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `WebhookSecret:` + strings.Replace(fmt.Sprintf("%v", this.WebhookSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, - `Active:` + fmt.Sprintf("%v", this.Active) + `,`, - `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, - `GithubBaseURL:` + fmt.Sprintf("%v", this.GithubBaseURL) + `,`, - `GithubUploadURL:` + fmt.Sprintf("%v", this.GithubUploadURL) + `,`, - `DeleteHookOnFinish:` + fmt.Sprintf("%v", this.DeleteHookOnFinish) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `Repositories:` + repeatedStringForRepositories + `,`, - `}`, - }, "") - return s -} -func (this *GitlabEventSource) String() string { - if this == nil { - return "nil" + if m.Replicas != nil { + n += 2 + sovGenerated(uint64(*m.Replicas)) } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if len(m.BitbucketServer) > 0 { + for k, v := range m.BitbucketServer { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if len(m.Bitbucket) > 0 { + for k, v := range m.Bitbucket { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&GitlabEventSource{`, - `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, - `ProjectID:` + fmt.Sprintf("%v", this.ProjectID) + `,`, - `Events:` + fmt.Sprintf("%v", this.Events) + `,`, - `AccessToken:` + strings.Replace(fmt.Sprintf("%v", this.AccessToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `EnableSSLVerification:` + fmt.Sprintf("%v", this.EnableSSLVerification) + `,`, - `GitlabBaseURL:` + fmt.Sprintf("%v", this.GitlabBaseURL) + `,`, - `DeleteHookOnFinish:` + fmt.Sprintf("%v", this.DeleteHookOnFinish) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *HDFSEventSource) String() string { - if this == nil { - return "nil" + if len(m.RedisStream) > 0 { + for k, v := range m.RedisStream { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if len(m.AzureServiceBus) > 0 { + for k, v := range m.AzureServiceBus { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if len(m.AzureQueueStorage) > 0 { + for k, v := range m.AzureQueueStorage { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&HDFSEventSource{`, - `WatchPathConfig:` + strings.Replace(strings.Replace(this.WatchPathConfig.String(), "WatchPathConfig", "WatchPathConfig", 1), `&`, ``, 1) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `CheckInterval:` + fmt.Sprintf("%v", this.CheckInterval) + `,`, - `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, - `HDFSUser:` + fmt.Sprintf("%v", this.HDFSUser) + `,`, - `KrbCCacheSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbCCacheSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `KrbKeytabSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbKeytabSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `KrbUsername:` + fmt.Sprintf("%v", this.KrbUsername) + `,`, - `KrbRealm:` + fmt.Sprintf("%v", this.KrbRealm) + `,`, - `KrbConfigConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.KrbConfigConfigMap), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, - `KrbServicePrincipalName:` + fmt.Sprintf("%v", this.KrbServicePrincipalName) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s + if len(m.SFTP) > 0 { + for k, v := range m.SFTP { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Gerrit) > 0 { + for k, v := range m.Gerrit { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } -func (this *KafkaConsumerGroup) String() string { - if this == nil { - return "nil" + +func (m *EventSourceStatus) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&KafkaConsumerGroup{`, - `GroupName:` + fmt.Sprintf("%v", this.GroupName) + `,`, - `Oldest:` + fmt.Sprintf("%v", this.Oldest) + `,`, - `RebalanceStrategy:` + fmt.Sprintf("%v", this.RebalanceStrategy) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *KafkaEventSource) String() string { - if this == nil { - return "nil" + +func (m *FileEventSource) Size() (n int) { + if m == nil { + return 0 } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + var l int + _ = l + l = len(m.EventType) + n += 1 + l + sovGenerated(uint64(l)) + l = m.WatchPathConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&KafkaEventSource{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `ConsumerGroup:` + strings.Replace(this.ConsumerGroup.String(), "KafkaConsumerGroup", "KafkaConsumerGroup", 1) + `,`, - `LimitEventsPerSecond:` + fmt.Sprintf("%v", this.LimitEventsPerSecond) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `SASL:` + strings.Replace(fmt.Sprintf("%v", this.SASL), "SASLConfig", "common.SASLConfig", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *MQTTEventSource) String() string { - if this == nil { - return "nil" - } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + +func (m *GenericEventSource) Size() (n int) { + if m == nil { + return 0 } - mapStringForMetadata += "}" - s := strings.Join([]string{`&MQTTEventSource{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, - `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *NATSAuth) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NATSAuth{`, - `Basic:` + strings.Replace(fmt.Sprintf("%v", this.Basic), "BasicAuth", "common.BasicAuth", 1) + `,`, - `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `NKey:` + strings.Replace(fmt.Sprintf("%v", this.NKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Credential:` + strings.Replace(fmt.Sprintf("%v", this.Credential), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NATSEventsSource) String() string { - if this == nil { - return "nil" + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Config) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if m.AuthSecret != nil { + l = m.AuthSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&NATSEventsSource{`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, - `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `Auth:` + strings.Replace(this.Auth.String(), "NATSAuth", "NATSAuth", 1) + `,`, - `}`, - }, "") - return s + return n } -func (this *NSQEventSource) String() string { - if this == nil { - return "nil" + +func (m *GerritEventSource) Size() (n int) { + if m == nil { + return 0 } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + var l int + _ = l + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + l = len(m.HookName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Events) > 0 { + for _, s := range m.Events { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&NSQEventSource{`, - `HostAddress:` + fmt.Sprintf("%v", this.HostAddress) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `Channel:` + fmt.Sprintf("%v", this.Channel) + `,`, - `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *OwnedRepositories) String() string { - if this == nil { - return "nil" + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&OwnedRepositories{`, - `Owner:` + fmt.Sprintf("%v", this.Owner) + `,`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `}`, - }, "") - return s -} -func (this *PubSubEventSource) String() string { - if this == nil { - return "nil" + l = len(m.GerritBaseURL) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if len(m.Projects) > 0 { + for _, s := range m.Projects { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + n += 2 + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&PubSubEventSource{`, - `ProjectID:` + fmt.Sprintf("%v", this.ProjectID) + `,`, - `TopicProjectID:` + fmt.Sprintf("%v", this.TopicProjectID) + `,`, - `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, - `SubscriptionID:` + fmt.Sprintf("%v", this.SubscriptionID) + `,`, - `CredentialSecret:` + strings.Replace(fmt.Sprintf("%v", this.CredentialSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `DeleteSubscriptionOnFinish:` + fmt.Sprintf("%v", this.DeleteSubscriptionOnFinish) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `DeprecatedCredentialsFile:` + fmt.Sprintf("%v", this.DeprecatedCredentialsFile) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s + return n } -func (this *PulsarEventSource) String() string { - if this == nil { - return "nil" - } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + +func (m *GithubAppCreds) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + var l int + _ = l + if m.PrivateKey != nil { + l = m.PrivateKey.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&PulsarEventSource{`, - `Topics:` + fmt.Sprintf("%v", this.Topics) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `TLSTrustCertsSecret:` + strings.Replace(fmt.Sprintf("%v", this.TLSTrustCertsSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `TLSAllowInsecureConnection:` + fmt.Sprintf("%v", this.TLSAllowInsecureConnection) + `,`, - `TLSValidateHostname:` + fmt.Sprintf("%v", this.TLSValidateHostname) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s + n += 1 + sovGenerated(uint64(m.AppID)) + n += 1 + sovGenerated(uint64(m.InstallationID)) + return n } -func (this *RedisEventSource) String() string { - if this == nil { - return "nil" - } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + +func (m *GithubEventSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ID)) + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&RedisEventSource{`, - `HostAddress:` + fmt.Sprintf("%v", this.HostAddress) + `,`, - `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `DB:` + fmt.Sprintf("%v", this.DB) + `,`, - `Channels:` + fmt.Sprintf("%v", this.Channels) + `,`, - `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *ResourceEventSource) String() string { - if this == nil { - return "nil" + l = len(m.DeprecatedOwner) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Events) > 0 { + for _, s := range m.Events { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if m.APIToken != nil { + l = m.APIToken.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.WebhookSecret != nil { + l = m.WebhookSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&ResourceEventSource{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Filter:` + strings.Replace(this.Filter.String(), "ResourceFilter", "ResourceFilter", 1) + `,`, - `GroupVersionResource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.GroupVersionResource), "GroupVersionResource", "v11.GroupVersionResource", 1), `&`, ``, 1) + `,`, - `EventTypes:` + fmt.Sprintf("%v", this.EventTypes) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFilter) String() string { - if this == nil { - return "nil" - } - repeatedStringForLabels := "[]Selector{" - for _, f := range this.Labels { - repeatedStringForLabels += strings.Replace(strings.Replace(f.String(), "Selector", "Selector", 1), `&`, ``, 1) + "," + n += 2 + n += 2 + l = len(m.ContentType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GithubBaseURL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GithubUploadURL) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - repeatedStringForLabels += "}" - repeatedStringForFields := "[]Selector{" - for _, f := range this.Fields { - repeatedStringForFields += strings.Replace(strings.Replace(f.String(), "Selector", "Selector", 1), `&`, ``, 1) + "," + if len(m.Repositories) > 0 { + for _, e := range m.Repositories { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForFields += "}" - s := strings.Join([]string{`&ResourceFilter{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `Labels:` + repeatedStringForLabels + `,`, - `Fields:` + repeatedStringForFields + `,`, - `CreatedBy:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedBy), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, - `AfterStart:` + fmt.Sprintf("%v", this.AfterStart) + `,`, - `}`, - }, "") - return s -} -func (this *SNSEventSource) String() string { - if this == nil { - return "nil" + if len(m.Organizations) > 0 { + for _, s := range m.Organizations { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if m.GithubApp != nil { + l = m.GithubApp.Size() + n += 2 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.Filter != nil { + l = m.Filter.Size() + n += 2 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&SNSEventSource{`, - `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, - `TopicArn:` + fmt.Sprintf("%v", this.TopicArn) + `,`, - `AccessKey:` + strings.Replace(fmt.Sprintf("%v", this.AccessKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `SecretKey:` + strings.Replace(fmt.Sprintf("%v", this.SecretKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Region:` + fmt.Sprintf("%v", this.Region) + `,`, - `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `ValidateSignature:` + fmt.Sprintf("%v", this.ValidateSignature) + `,`, - `}`, - }, "") - return s + l = m.PayloadEnrichment.Size() + n += 2 + l + sovGenerated(uint64(l)) + return n } -func (this *SQSEventSource) String() string { - if this == nil { - return "nil" + +func (m *GitlabEventSource) Size() (n int) { + if m == nil { + return 0 } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + var l int + _ = l + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + l = len(m.DeprecatedProjectID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Events) > 0 { + for _, s := range m.Events { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&SQSEventSource{`, - `AccessKey:` + strings.Replace(fmt.Sprintf("%v", this.AccessKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `SecretKey:` + strings.Replace(fmt.Sprintf("%v", this.SecretKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Region:` + fmt.Sprintf("%v", this.Region) + `,`, - `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, - `WaitTimeSeconds:` + fmt.Sprintf("%v", this.WaitTimeSeconds) + `,`, - `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, - `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, - `QueueAccountID:` + fmt.Sprintf("%v", this.QueueAccountID) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *Selector) String() string { - if this == nil { - return "nil" + if m.AccessToken != nil { + l = m.AccessToken.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&Selector{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Service) String() string { - if this == nil { - return "nil" + n += 2 + l = len(m.GitlabBaseURL) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - repeatedStringForPorts := "[]ServicePort{" - for _, f := range this.Ports { - repeatedStringForPorts += fmt.Sprintf("%v", f) + "," + if len(m.Projects) > 0 { + for _, s := range m.Projects { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - repeatedStringForPorts += "}" - s := strings.Join([]string{`&Service{`, - `Ports:` + repeatedStringForPorts + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `}`, - }, "") - return s -} -func (this *SlackEventSource) String() string { - if this == nil { - return "nil" + if m.SecretToken != nil { + l = m.SecretToken.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&SlackEventSource{`, - `SigningSecret:` + strings.Replace(fmt.Sprintf("%v", this.SigningSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s + return n } -func (this *StorageGridEventSource) String() string { - if this == nil { - return "nil" - } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + +func (m *HDFSEventSource) Size() (n int) { + if m == nil { + return 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + var l int + _ = l + l = m.WatchPathConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CheckInterval) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&StorageGridEventSource{`, - `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, - `Events:` + fmt.Sprintf("%v", this.Events) + `,`, - `Filter:` + strings.Replace(this.Filter.String(), "StorageGridFilter", "StorageGridFilter", 1) + `,`, - `TopicArn:` + fmt.Sprintf("%v", this.TopicArn) + `,`, - `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, - `Region:` + fmt.Sprintf("%v", this.Region) + `,`, - `AuthToken:` + strings.Replace(fmt.Sprintf("%v", this.AuthToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `APIURL:` + fmt.Sprintf("%v", this.APIURL) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s -} -func (this *StorageGridFilter) String() string { - if this == nil { - return "nil" + l = len(m.HDFSUser) + n += 1 + l + sovGenerated(uint64(l)) + if m.KrbCCacheSecret != nil { + l = m.KrbCCacheSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - s := strings.Join([]string{`&StorageGridFilter{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `Suffix:` + fmt.Sprintf("%v", this.Suffix) + `,`, - `}`, - }, "") - return s -} -func (this *StripeEventSource) String() string { - if this == nil { - return "nil" + if m.KrbKeytabSecret != nil { + l = m.KrbKeytabSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + l = len(m.KrbUsername) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KrbRealm) + n += 1 + l + sovGenerated(uint64(l)) + if m.KrbConfigConfigMap != nil { + l = m.KrbConfigConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + l = len(m.KrbServicePrincipalName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - mapStringForMetadata += "}" - s := strings.Join([]string{`&StripeEventSource{`, - `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, - `CreateWebhook:` + fmt.Sprintf("%v", this.CreateWebhook) + `,`, - `APIKey:` + strings.Replace(fmt.Sprintf("%v", this.APIKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `EventFilter:` + fmt.Sprintf("%v", this.EventFilter) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `}`, - }, "") - return s + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *Template) String() string { - if this == nil { - return "nil" + +func (m *KafkaConsumerGroup) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = len(m.GroupName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.RebalanceStrategy) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *KafkaEventSource) Size() (n int) { + if m == nil { + return 0 } - repeatedStringForVolumes += "}" - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Partition) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForTolerations += "}" - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - repeatedStringForImagePullSecrets += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + if m.ConsumerGroup != nil { + l = m.ConsumerGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&Template{`, - `Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "Metadata", "common.Metadata", 1) + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1) + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `}`, - }, "") - return s + n += 1 + sovGenerated(uint64(m.LimitEventsPerSecond)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + if m.SASL != nil { + l = m.SASL.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Config) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (this *WatchPathConfig) String() string { - if this == nil { - return "nil" + +func (m *MQTTEventSource) Size() (n int) { + if m == nil { + return 0 } - s := strings.Join([]string{`&WatchPathConfig{`, - `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `PathRegexp:` + fmt.Sprintf("%v", this.PathRegexp) + `,`, - `}`, - }, "") - return s + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientID) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (this *WebhookContext) String() string { - if this == nil { - return "nil" + +func (m *NATSAuth) Size() (n int) { + if m == nil { + return 0 } - keysForMetadata := make([]string, 0, len(this.Metadata)) - for k := range this.Metadata { - keysForMetadata = append(keysForMetadata, k) + var l int + _ = l + if m.Basic != nil { + l = m.Basic.Size() + n += 1 + l + sovGenerated(uint64(l)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - mapStringForMetadata := "map[string]string{" - for _, k := range keysForMetadata { - mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + if m.Token != nil { + l = m.Token.Size() + n += 1 + l + sovGenerated(uint64(l)) } - mapStringForMetadata += "}" - s := strings.Join([]string{`&WebhookContext{`, - `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, - `Method:` + fmt.Sprintf("%v", this.Method) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `URL:` + fmt.Sprintf("%v", this.URL) + `,`, - `ServerCertSecret:` + strings.Replace(fmt.Sprintf("%v", this.ServerCertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `ServerKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ServerKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `Metadata:` + mapStringForMetadata + `,`, - `AuthSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, - `DeprecatedServerCertPath:` + fmt.Sprintf("%v", this.DeprecatedServerCertPath) + `,`, - `DeprecatedServerKeyPath:` + fmt.Sprintf("%v", this.DeprecatedServerKeyPath) + `,`, - `}`, - }, "") - return s + if m.NKey != nil { + l = m.NKey.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Credential != nil { + l = m.Credential.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + +func (m *NATSEventsSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subject) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Auth != nil { + l = m.Auth.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Queue != nil { + l = len(*m.Queue) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NSQEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostAddress) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Channel) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *OwnedRepositories) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PayloadEnrichmentFlags) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *PubSubEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ProjectID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TopicProjectID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubscriptionID) + n += 1 + l + sovGenerated(uint64(l)) + if m.CredentialSecret != nil { + l = m.CredentialSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PulsarEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Topics) > 0 { + for _, s := range m.Topics { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLSTrustCertsSecret != nil { + l = m.TLSTrustCertsSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + n += 2 + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AuthTokenSecret != nil { + l = m.AuthTokenSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuthAthenzParams) > 0 { + for k, v := range m.AuthAthenzParams { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AuthAthenzSecret != nil { + l = m.AuthAthenzSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RedisEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostAddress) + n += 1 + l + sovGenerated(uint64(l)) + if m.Password != nil { + l = m.Password.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DB)) + if len(m.Channels) > 0 { + for _, s := range m.Channels { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RedisStreamEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HostAddress) + n += 1 + l + sovGenerated(uint64(l)) + if m.Password != nil { + l = m.Password.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.DB)) + if len(m.Streams) > 0 { + for _, s := range m.Streams { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.MaxMsgCountPerRead)) + l = len(m.ConsumerGroup) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.GroupVersionResource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EventTypes) > 0 { + for _, s := range m.EventTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Cluster) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.CreatedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *SFTPEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EventType) + n += 1 + l + sovGenerated(uint64(l)) + l = m.WatchPathConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Username != nil { + l = m.Username.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Password != nil { + l = m.Password.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SSHKeySecret != nil { + l = m.SSHKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Address != nil { + l = m.Address.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PollIntervalDuration) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SNSEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TopicArn) + n += 1 + l + sovGenerated(uint64(l)) + if m.AccessKey != nil { + l = m.AccessKey.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKey != nil { + l = m.SecretKey.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RoleARN) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 2 + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Endpoint) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SQSEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AccessKey != nil { + l = m.AccessKey.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKey != nil { + l = m.SecretKey.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Queue) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.WaitTimeSeconds)) + l = len(m.RoleARN) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.QueueAccountID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 2 + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Endpoint) + n += 1 + l + sovGenerated(uint64(l)) + if m.SessionToken != nil { + l = m.SessionToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Selector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SlackEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SigningSecret != nil { + l = m.SigningSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Token != nil { + l = m.Token.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageGridEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Events) > 0 { + for _, s := range m.Events { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TopicArn) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Bucket) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Region) + n += 1 + l + sovGenerated(uint64(l)) + if m.AuthToken != nil { + l = m.AuthToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.APIURL) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *StorageGridFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Suffix) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StripeEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Webhook != nil { + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.APIKey != nil { + l = m.APIKey.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.EventFilter) > 0 { + for _, s := range m.EventFilter { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Template) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Metadata != nil { + l = m.Metadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Affinity != nil { + l = m.Affinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) + } + return n +} + +func (m *WatchPathConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PathRegexp) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Endpoint) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Method) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Port) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + if m.ServerCertSecret != nil { + l = m.ServerCertSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServerKeySecret != nil { + l = m.ServerKeySecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AuthSecret != nil { + l = m.AuthSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxPayloadSize != nil { + n += 1 + sovGenerated(uint64(*m.MaxPayloadSize)) + } + return n +} + +func (m *WebhookEventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.WebhookContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Filter != nil { + l = m.Filter.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AMQPConsumeConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AMQPConsumeConfig{`, + `ConsumerTag:` + fmt.Sprintf("%v", this.ConsumerTag) + `,`, + `AutoAck:` + fmt.Sprintf("%v", this.AutoAck) + `,`, + `Exclusive:` + fmt.Sprintf("%v", this.Exclusive) + `,`, + `NoLocal:` + fmt.Sprintf("%v", this.NoLocal) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `}`, + }, "") + return s +} +func (this *AMQPEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&AMQPEventSource{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `ExchangeName:` + fmt.Sprintf("%v", this.ExchangeName) + `,`, + `ExchangeType:` + fmt.Sprintf("%v", this.ExchangeType) + `,`, + `RoutingKey:` + fmt.Sprintf("%v", this.RoutingKey) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `ExchangeDeclare:` + strings.Replace(this.ExchangeDeclare.String(), "AMQPExchangeDeclareConfig", "AMQPExchangeDeclareConfig", 1) + `,`, + `QueueDeclare:` + strings.Replace(this.QueueDeclare.String(), "AMQPQueueDeclareConfig", "AMQPQueueDeclareConfig", 1) + `,`, + `QueueBind:` + strings.Replace(this.QueueBind.String(), "AMQPQueueBindConfig", "AMQPQueueBindConfig", 1) + `,`, + `Consume:` + strings.Replace(this.Consume.String(), "AMQPConsumeConfig", "AMQPConsumeConfig", 1) + `,`, + `Auth:` + strings.Replace(fmt.Sprintf("%v", this.Auth), "BasicAuth", "common.BasicAuth", 1) + `,`, + `URLSecret:` + strings.Replace(fmt.Sprintf("%v", this.URLSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AMQPExchangeDeclareConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AMQPExchangeDeclareConfig{`, + `Durable:` + fmt.Sprintf("%v", this.Durable) + `,`, + `AutoDelete:` + fmt.Sprintf("%v", this.AutoDelete) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `}`, + }, "") + return s +} +func (this *AMQPQueueBindConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AMQPQueueBindConfig{`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `}`, + }, "") + return s +} +func (this *AMQPQueueDeclareConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AMQPQueueDeclareConfig{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Durable:` + fmt.Sprintf("%v", this.Durable) + `,`, + `AutoDelete:` + fmt.Sprintf("%v", this.AutoDelete) + `,`, + `Exclusive:` + fmt.Sprintf("%v", this.Exclusive) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Arguments:` + fmt.Sprintf("%v", this.Arguments) + `,`, + `}`, + }, "") + return s +} +func (this *AzureEventsHubEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&AzureEventsHubEventSource{`, + `FQDN:` + fmt.Sprintf("%v", this.FQDN) + `,`, + `SharedAccessKeyName:` + strings.Replace(fmt.Sprintf("%v", this.SharedAccessKeyName), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SharedAccessKey:` + strings.Replace(fmt.Sprintf("%v", this.SharedAccessKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `HubName:` + fmt.Sprintf("%v", this.HubName) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AzureQueueStorageEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&AzureQueueStorageEventSource{`, + `StorageAccountName:` + fmt.Sprintf("%v", this.StorageAccountName) + `,`, + `ConnectionString:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionString), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `QueueName:` + fmt.Sprintf("%v", this.QueueName) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `DLQ:` + fmt.Sprintf("%v", this.DLQ) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `DecodeMessage:` + fmt.Sprintf("%v", this.DecodeMessage) + `,`, + `WaitTimeInSeconds:` + valueToStringGenerated(this.WaitTimeInSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *AzureServiceBusEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&AzureServiceBusEventSource{`, + `ConnectionString:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionString), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `QueueName:` + fmt.Sprintf("%v", this.QueueName) + `,`, + `TopicName:` + fmt.Sprintf("%v", this.TopicName) + `,`, + `SubscriptionName:` + fmt.Sprintf("%v", this.SubscriptionName) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `FullyQualifiedNamespace:` + fmt.Sprintf("%v", this.FullyQualifiedNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketAuth{`, + `Basic:` + strings.Replace(this.Basic.String(), "BitbucketBasicAuth", "BitbucketBasicAuth", 1) + `,`, + `OAuthToken:` + strings.Replace(fmt.Sprintf("%v", this.OAuthToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketBasicAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketBasicAuth{`, + `Username:` + strings.Replace(fmt.Sprintf("%v", this.Username), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketEventSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForRepositories := "[]BitbucketRepository{" + for _, f := range this.Repositories { + repeatedStringForRepositories += strings.Replace(strings.Replace(f.String(), "BitbucketRepository", "BitbucketRepository", 1), `&`, ``, 1) + "," + } + repeatedStringForRepositories += "}" + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&BitbucketEventSource{`, + `DeleteHookOnFinish:` + fmt.Sprintf("%v", this.DeleteHookOnFinish) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "BitbucketAuth", "BitbucketAuth", 1) + `,`, + `Events:` + fmt.Sprintf("%v", this.Events) + `,`, + `DeprecatedOwner:` + fmt.Sprintf("%v", this.DeprecatedOwner) + `,`, + `DeprecatedProjectKey:` + fmt.Sprintf("%v", this.DeprecatedProjectKey) + `,`, + `DeprecatedRepositorySlug:` + fmt.Sprintf("%v", this.DeprecatedRepositorySlug) + `,`, + `Repositories:` + repeatedStringForRepositories + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketRepository{`, + `Owner:` + fmt.Sprintf("%v", this.Owner) + `,`, + `RepositorySlug:` + fmt.Sprintf("%v", this.RepositorySlug) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketServerEventSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForRepositories := "[]BitbucketServerRepository{" + for _, f := range this.Repositories { + repeatedStringForRepositories += strings.Replace(strings.Replace(f.String(), "BitbucketServerRepository", "BitbucketServerRepository", 1), `&`, ``, 1) + "," + } + repeatedStringForRepositories += "}" + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&BitbucketServerEventSource{`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `DeprecatedProjectKey:` + fmt.Sprintf("%v", this.DeprecatedProjectKey) + `,`, + `DeprecatedRepositorySlug:` + fmt.Sprintf("%v", this.DeprecatedRepositorySlug) + `,`, + `Projects:` + fmt.Sprintf("%v", this.Projects) + `,`, + `Repositories:` + repeatedStringForRepositories + `,`, + `Events:` + fmt.Sprintf("%v", this.Events) + `,`, + `SkipBranchRefsChangedOnOpenPR:` + fmt.Sprintf("%v", this.SkipBranchRefsChangedOnOpenPR) + `,`, + `AccessToken:` + strings.Replace(fmt.Sprintf("%v", this.AccessToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `WebhookSecret:` + strings.Replace(fmt.Sprintf("%v", this.WebhookSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `BitbucketServerBaseURL:` + fmt.Sprintf("%v", this.BitbucketServerBaseURL) + `,`, + `DeleteHookOnFinish:` + fmt.Sprintf("%v", this.DeleteHookOnFinish) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `CheckInterval:` + fmt.Sprintf("%v", this.CheckInterval) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketServerRepository) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketServerRepository{`, + `ProjectKey:` + fmt.Sprintf("%v", this.ProjectKey) + `,`, + `RepositorySlug:` + fmt.Sprintf("%v", this.RepositorySlug) + `,`, + `}`, + }, "") + return s +} +func (this *CalendarEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&CalendarEventSource{`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `Interval:` + fmt.Sprintf("%v", this.Interval) + `,`, + `ExclusionDates:` + fmt.Sprintf("%v", this.ExclusionDates) + `,`, + `Timezone:` + fmt.Sprintf("%v", this.Timezone) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Persistence:` + strings.Replace(this.Persistence.String(), "EventPersistence", "EventPersistence", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CatchupConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CatchupConfiguration{`, + `Enabled:` + fmt.Sprintf("%v", this.Enabled) + `,`, + `MaxDuration:` + fmt.Sprintf("%v", this.MaxDuration) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapPersistence) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapPersistence{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CreateIfNotExist:` + fmt.Sprintf("%v", this.CreateIfNotExist) + `,`, + `}`, + }, "") + return s +} +func (this *EmitterEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&EmitterEventSource{`, + `Broker:` + fmt.Sprintf("%v", this.Broker) + `,`, + `ChannelKey:` + fmt.Sprintf("%v", this.ChannelKey) + `,`, + `ChannelName:` + fmt.Sprintf("%v", this.ChannelName) + `,`, + `Username:` + strings.Replace(fmt.Sprintf("%v", this.Username), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventPersistence) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventPersistence{`, + `Catchup:` + strings.Replace(this.Catchup.String(), "CatchupConfiguration", "CatchupConfiguration", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapPersistence", "ConfigMapPersistence", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EventSourceSpec", "EventSourceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "EventSourceStatus", "EventSourceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSourceFilter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSourceFilter{`, + `Expression:` + fmt.Sprintf("%v", this.Expression) + `,`, + `}`, + }, "") + return s +} +func (this *EventSourceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EventSource{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventSourceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventSourceSpec) String() string { + if this == nil { + return "nil" + } + keysForMinio := make([]string, 0, len(this.Minio)) + for k := range this.Minio { + keysForMinio = append(keysForMinio, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMinio) + mapStringForMinio := "map[string]common.S3Artifact{" + for _, k := range keysForMinio { + mapStringForMinio += fmt.Sprintf("%v: %v,", k, this.Minio[k]) + } + mapStringForMinio += "}" + keysForCalendar := make([]string, 0, len(this.Calendar)) + for k := range this.Calendar { + keysForCalendar = append(keysForCalendar, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCalendar) + mapStringForCalendar := "map[string]CalendarEventSource{" + for _, k := range keysForCalendar { + mapStringForCalendar += fmt.Sprintf("%v: %v,", k, this.Calendar[k]) + } + mapStringForCalendar += "}" + keysForFile := make([]string, 0, len(this.File)) + for k := range this.File { + keysForFile = append(keysForFile, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFile) + mapStringForFile := "map[string]FileEventSource{" + for _, k := range keysForFile { + mapStringForFile += fmt.Sprintf("%v: %v,", k, this.File[k]) + } + mapStringForFile += "}" + keysForResource := make([]string, 0, len(this.Resource)) + for k := range this.Resource { + keysForResource = append(keysForResource, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForResource) + mapStringForResource := "map[string]ResourceEventSource{" + for _, k := range keysForResource { + mapStringForResource += fmt.Sprintf("%v: %v,", k, this.Resource[k]) + } + mapStringForResource += "}" + keysForWebhook := make([]string, 0, len(this.Webhook)) + for k := range this.Webhook { + keysForWebhook = append(keysForWebhook, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForWebhook) + mapStringForWebhook := "map[string]WebhookEventSource{" + for _, k := range keysForWebhook { + mapStringForWebhook += fmt.Sprintf("%v: %v,", k, this.Webhook[k]) + } + mapStringForWebhook += "}" + keysForAMQP := make([]string, 0, len(this.AMQP)) + for k := range this.AMQP { + keysForAMQP = append(keysForAMQP, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAMQP) + mapStringForAMQP := "map[string]AMQPEventSource{" + for _, k := range keysForAMQP { + mapStringForAMQP += fmt.Sprintf("%v: %v,", k, this.AMQP[k]) + } + mapStringForAMQP += "}" + keysForKafka := make([]string, 0, len(this.Kafka)) + for k := range this.Kafka { + keysForKafka = append(keysForKafka, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForKafka) + mapStringForKafka := "map[string]KafkaEventSource{" + for _, k := range keysForKafka { + mapStringForKafka += fmt.Sprintf("%v: %v,", k, this.Kafka[k]) + } + mapStringForKafka += "}" + keysForMQTT := make([]string, 0, len(this.MQTT)) + for k := range this.MQTT { + keysForMQTT = append(keysForMQTT, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMQTT) + mapStringForMQTT := "map[string]MQTTEventSource{" + for _, k := range keysForMQTT { + mapStringForMQTT += fmt.Sprintf("%v: %v,", k, this.MQTT[k]) + } + mapStringForMQTT += "}" + keysForNATS := make([]string, 0, len(this.NATS)) + for k := range this.NATS { + keysForNATS = append(keysForNATS, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNATS) + mapStringForNATS := "map[string]NATSEventsSource{" + for _, k := range keysForNATS { + mapStringForNATS += fmt.Sprintf("%v: %v,", k, this.NATS[k]) + } + mapStringForNATS += "}" + keysForSNS := make([]string, 0, len(this.SNS)) + for k := range this.SNS { + keysForSNS = append(keysForSNS, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSNS) + mapStringForSNS := "map[string]SNSEventSource{" + for _, k := range keysForSNS { + mapStringForSNS += fmt.Sprintf("%v: %v,", k, this.SNS[k]) + } + mapStringForSNS += "}" + keysForSQS := make([]string, 0, len(this.SQS)) + for k := range this.SQS { + keysForSQS = append(keysForSQS, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSQS) + mapStringForSQS := "map[string]SQSEventSource{" + for _, k := range keysForSQS { + mapStringForSQS += fmt.Sprintf("%v: %v,", k, this.SQS[k]) + } + mapStringForSQS += "}" + keysForPubSub := make([]string, 0, len(this.PubSub)) + for k := range this.PubSub { + keysForPubSub = append(keysForPubSub, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPubSub) + mapStringForPubSub := "map[string]PubSubEventSource{" + for _, k := range keysForPubSub { + mapStringForPubSub += fmt.Sprintf("%v: %v,", k, this.PubSub[k]) + } + mapStringForPubSub += "}" + keysForGithub := make([]string, 0, len(this.Github)) + for k := range this.Github { + keysForGithub = append(keysForGithub, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForGithub) + mapStringForGithub := "map[string]GithubEventSource{" + for _, k := range keysForGithub { + mapStringForGithub += fmt.Sprintf("%v: %v,", k, this.Github[k]) + } + mapStringForGithub += "}" + keysForGitlab := make([]string, 0, len(this.Gitlab)) + for k := range this.Gitlab { + keysForGitlab = append(keysForGitlab, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForGitlab) + mapStringForGitlab := "map[string]GitlabEventSource{" + for _, k := range keysForGitlab { + mapStringForGitlab += fmt.Sprintf("%v: %v,", k, this.Gitlab[k]) + } + mapStringForGitlab += "}" + keysForHDFS := make([]string, 0, len(this.HDFS)) + for k := range this.HDFS { + keysForHDFS = append(keysForHDFS, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHDFS) + mapStringForHDFS := "map[string]HDFSEventSource{" + for _, k := range keysForHDFS { + mapStringForHDFS += fmt.Sprintf("%v: %v,", k, this.HDFS[k]) + } + mapStringForHDFS += "}" + keysForSlack := make([]string, 0, len(this.Slack)) + for k := range this.Slack { + keysForSlack = append(keysForSlack, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSlack) + mapStringForSlack := "map[string]SlackEventSource{" + for _, k := range keysForSlack { + mapStringForSlack += fmt.Sprintf("%v: %v,", k, this.Slack[k]) + } + mapStringForSlack += "}" + keysForStorageGrid := make([]string, 0, len(this.StorageGrid)) + for k := range this.StorageGrid { + keysForStorageGrid = append(keysForStorageGrid, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStorageGrid) + mapStringForStorageGrid := "map[string]StorageGridEventSource{" + for _, k := range keysForStorageGrid { + mapStringForStorageGrid += fmt.Sprintf("%v: %v,", k, this.StorageGrid[k]) + } + mapStringForStorageGrid += "}" + keysForAzureEventsHub := make([]string, 0, len(this.AzureEventsHub)) + for k := range this.AzureEventsHub { + keysForAzureEventsHub = append(keysForAzureEventsHub, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAzureEventsHub) + mapStringForAzureEventsHub := "map[string]AzureEventsHubEventSource{" + for _, k := range keysForAzureEventsHub { + mapStringForAzureEventsHub += fmt.Sprintf("%v: %v,", k, this.AzureEventsHub[k]) + } + mapStringForAzureEventsHub += "}" + keysForStripe := make([]string, 0, len(this.Stripe)) + for k := range this.Stripe { + keysForStripe = append(keysForStripe, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStripe) + mapStringForStripe := "map[string]StripeEventSource{" + for _, k := range keysForStripe { + mapStringForStripe += fmt.Sprintf("%v: %v,", k, this.Stripe[k]) + } + mapStringForStripe += "}" + keysForEmitter := make([]string, 0, len(this.Emitter)) + for k := range this.Emitter { + keysForEmitter = append(keysForEmitter, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForEmitter) + mapStringForEmitter := "map[string]EmitterEventSource{" + for _, k := range keysForEmitter { + mapStringForEmitter += fmt.Sprintf("%v: %v,", k, this.Emitter[k]) + } + mapStringForEmitter += "}" + keysForRedis := make([]string, 0, len(this.Redis)) + for k := range this.Redis { + keysForRedis = append(keysForRedis, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRedis) + mapStringForRedis := "map[string]RedisEventSource{" + for _, k := range keysForRedis { + mapStringForRedis += fmt.Sprintf("%v: %v,", k, this.Redis[k]) + } + mapStringForRedis += "}" + keysForNSQ := make([]string, 0, len(this.NSQ)) + for k := range this.NSQ { + keysForNSQ = append(keysForNSQ, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNSQ) + mapStringForNSQ := "map[string]NSQEventSource{" + for _, k := range keysForNSQ { + mapStringForNSQ += fmt.Sprintf("%v: %v,", k, this.NSQ[k]) + } + mapStringForNSQ += "}" + keysForPulsar := make([]string, 0, len(this.Pulsar)) + for k := range this.Pulsar { + keysForPulsar = append(keysForPulsar, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPulsar) + mapStringForPulsar := "map[string]PulsarEventSource{" + for _, k := range keysForPulsar { + mapStringForPulsar += fmt.Sprintf("%v: %v,", k, this.Pulsar[k]) + } + mapStringForPulsar += "}" + keysForGeneric := make([]string, 0, len(this.Generic)) + for k := range this.Generic { + keysForGeneric = append(keysForGeneric, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForGeneric) + mapStringForGeneric := "map[string]GenericEventSource{" + for _, k := range keysForGeneric { + mapStringForGeneric += fmt.Sprintf("%v: %v,", k, this.Generic[k]) + } + mapStringForGeneric += "}" + keysForBitbucketServer := make([]string, 0, len(this.BitbucketServer)) + for k := range this.BitbucketServer { + keysForBitbucketServer = append(keysForBitbucketServer, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBitbucketServer) + mapStringForBitbucketServer := "map[string]BitbucketServerEventSource{" + for _, k := range keysForBitbucketServer { + mapStringForBitbucketServer += fmt.Sprintf("%v: %v,", k, this.BitbucketServer[k]) + } + mapStringForBitbucketServer += "}" + keysForBitbucket := make([]string, 0, len(this.Bitbucket)) + for k := range this.Bitbucket { + keysForBitbucket = append(keysForBitbucket, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBitbucket) + mapStringForBitbucket := "map[string]BitbucketEventSource{" + for _, k := range keysForBitbucket { + mapStringForBitbucket += fmt.Sprintf("%v: %v,", k, this.Bitbucket[k]) + } + mapStringForBitbucket += "}" + keysForRedisStream := make([]string, 0, len(this.RedisStream)) + for k := range this.RedisStream { + keysForRedisStream = append(keysForRedisStream, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRedisStream) + mapStringForRedisStream := "map[string]RedisStreamEventSource{" + for _, k := range keysForRedisStream { + mapStringForRedisStream += fmt.Sprintf("%v: %v,", k, this.RedisStream[k]) + } + mapStringForRedisStream += "}" + keysForAzureServiceBus := make([]string, 0, len(this.AzureServiceBus)) + for k := range this.AzureServiceBus { + keysForAzureServiceBus = append(keysForAzureServiceBus, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAzureServiceBus) + mapStringForAzureServiceBus := "map[string]AzureServiceBusEventSource{" + for _, k := range keysForAzureServiceBus { + mapStringForAzureServiceBus += fmt.Sprintf("%v: %v,", k, this.AzureServiceBus[k]) + } + mapStringForAzureServiceBus += "}" + keysForAzureQueueStorage := make([]string, 0, len(this.AzureQueueStorage)) + for k := range this.AzureQueueStorage { + keysForAzureQueueStorage = append(keysForAzureQueueStorage, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAzureQueueStorage) + mapStringForAzureQueueStorage := "map[string]AzureQueueStorageEventSource{" + for _, k := range keysForAzureQueueStorage { + mapStringForAzureQueueStorage += fmt.Sprintf("%v: %v,", k, this.AzureQueueStorage[k]) + } + mapStringForAzureQueueStorage += "}" + keysForSFTP := make([]string, 0, len(this.SFTP)) + for k := range this.SFTP { + keysForSFTP = append(keysForSFTP, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSFTP) + mapStringForSFTP := "map[string]SFTPEventSource{" + for _, k := range keysForSFTP { + mapStringForSFTP += fmt.Sprintf("%v: %v,", k, this.SFTP[k]) + } + mapStringForSFTP += "}" + keysForGerrit := make([]string, 0, len(this.Gerrit)) + for k := range this.Gerrit { + keysForGerrit = append(keysForGerrit, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForGerrit) + mapStringForGerrit := "map[string]GerritEventSource{" + for _, k := range keysForGerrit { + mapStringForGerrit += fmt.Sprintf("%v: %v,", k, this.Gerrit[k]) + } + mapStringForGerrit += "}" + s := strings.Join([]string{`&EventSourceSpec{`, + `EventBusName:` + fmt.Sprintf("%v", this.EventBusName) + `,`, + `Template:` + strings.Replace(this.Template.String(), "Template", "Template", 1) + `,`, + `Service:` + strings.Replace(this.Service.String(), "Service", "Service", 1) + `,`, + `Minio:` + mapStringForMinio + `,`, + `Calendar:` + mapStringForCalendar + `,`, + `File:` + mapStringForFile + `,`, + `Resource:` + mapStringForResource + `,`, + `Webhook:` + mapStringForWebhook + `,`, + `AMQP:` + mapStringForAMQP + `,`, + `Kafka:` + mapStringForKafka + `,`, + `MQTT:` + mapStringForMQTT + `,`, + `NATS:` + mapStringForNATS + `,`, + `SNS:` + mapStringForSNS + `,`, + `SQS:` + mapStringForSQS + `,`, + `PubSub:` + mapStringForPubSub + `,`, + `Github:` + mapStringForGithub + `,`, + `Gitlab:` + mapStringForGitlab + `,`, + `HDFS:` + mapStringForHDFS + `,`, + `Slack:` + mapStringForSlack + `,`, + `StorageGrid:` + mapStringForStorageGrid + `,`, + `AzureEventsHub:` + mapStringForAzureEventsHub + `,`, + `Stripe:` + mapStringForStripe + `,`, + `Emitter:` + mapStringForEmitter + `,`, + `Redis:` + mapStringForRedis + `,`, + `NSQ:` + mapStringForNSQ + `,`, + `Pulsar:` + mapStringForPulsar + `,`, + `Generic:` + mapStringForGeneric + `,`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `BitbucketServer:` + mapStringForBitbucketServer + `,`, + `Bitbucket:` + mapStringForBitbucket + `,`, + `RedisStream:` + mapStringForRedisStream + `,`, + `AzureServiceBus:` + mapStringForAzureServiceBus + `,`, + `AzureQueueStorage:` + mapStringForAzureQueueStorage + `,`, + `SFTP:` + mapStringForSFTP + `,`, + `Gerrit:` + mapStringForGerrit + `,`, + `}`, + }, "") + return s +} +func (this *EventSourceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSourceStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "common.Status", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *FileEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&FileEventSource{`, + `EventType:` + fmt.Sprintf("%v", this.EventType) + `,`, + `WatchPathConfig:` + strings.Replace(strings.Replace(this.WatchPathConfig.String(), "WatchPathConfig", "WatchPathConfig", 1), `&`, ``, 1) + `,`, + `Polling:` + fmt.Sprintf("%v", this.Polling) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GenericEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&GenericEventSource{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Config:` + fmt.Sprintf("%v", this.Config) + `,`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `AuthSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GerritEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&GerritEventSource{`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `HookName:` + fmt.Sprintf("%v", this.HookName) + `,`, + `Events:` + fmt.Sprintf("%v", this.Events) + `,`, + `Auth:` + strings.Replace(fmt.Sprintf("%v", this.Auth), "BasicAuth", "common.BasicAuth", 1) + `,`, + `GerritBaseURL:` + fmt.Sprintf("%v", this.GerritBaseURL) + `,`, + `DeleteHookOnFinish:` + fmt.Sprintf("%v", this.DeleteHookOnFinish) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Projects:` + fmt.Sprintf("%v", this.Projects) + `,`, + `SslVerify:` + fmt.Sprintf("%v", this.SslVerify) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GithubAppCreds) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GithubAppCreds{`, + `PrivateKey:` + strings.Replace(fmt.Sprintf("%v", this.PrivateKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `AppID:` + fmt.Sprintf("%v", this.AppID) + `,`, + `InstallationID:` + fmt.Sprintf("%v", this.InstallationID) + `,`, + `}`, + }, "") + return s +} +func (this *GithubEventSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForRepositories := "[]OwnedRepositories{" + for _, f := range this.Repositories { + repeatedStringForRepositories += strings.Replace(strings.Replace(f.String(), "OwnedRepositories", "OwnedRepositories", 1), `&`, ``, 1) + "," + } + repeatedStringForRepositories += "}" + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&GithubEventSource{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `DeprecatedOwner:` + fmt.Sprintf("%v", this.DeprecatedOwner) + `,`, + `DeprecatedRepository:` + fmt.Sprintf("%v", this.DeprecatedRepository) + `,`, + `Events:` + fmt.Sprintf("%v", this.Events) + `,`, + `APIToken:` + strings.Replace(fmt.Sprintf("%v", this.APIToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `WebhookSecret:` + strings.Replace(fmt.Sprintf("%v", this.WebhookSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, + `Active:` + fmt.Sprintf("%v", this.Active) + `,`, + `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, + `GithubBaseURL:` + fmt.Sprintf("%v", this.GithubBaseURL) + `,`, + `GithubUploadURL:` + fmt.Sprintf("%v", this.GithubUploadURL) + `,`, + `DeleteHookOnFinish:` + fmt.Sprintf("%v", this.DeleteHookOnFinish) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Repositories:` + repeatedStringForRepositories + `,`, + `Organizations:` + fmt.Sprintf("%v", this.Organizations) + `,`, + `GithubApp:` + strings.Replace(this.GithubApp.String(), "GithubAppCreds", "GithubAppCreds", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `PayloadEnrichment:` + strings.Replace(strings.Replace(this.PayloadEnrichment.String(), "PayloadEnrichmentFlags", "PayloadEnrichmentFlags", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitlabEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&GitlabEventSource{`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `DeprecatedProjectID:` + fmt.Sprintf("%v", this.DeprecatedProjectID) + `,`, + `Events:` + fmt.Sprintf("%v", this.Events) + `,`, + `AccessToken:` + strings.Replace(fmt.Sprintf("%v", this.AccessToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `EnableSSLVerification:` + fmt.Sprintf("%v", this.EnableSSLVerification) + `,`, + `GitlabBaseURL:` + fmt.Sprintf("%v", this.GitlabBaseURL) + `,`, + `DeleteHookOnFinish:` + fmt.Sprintf("%v", this.DeleteHookOnFinish) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Projects:` + fmt.Sprintf("%v", this.Projects) + `,`, + `SecretToken:` + strings.Replace(fmt.Sprintf("%v", this.SecretToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `}`, + }, "") + return s +} +func (this *HDFSEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&HDFSEventSource{`, + `WatchPathConfig:` + strings.Replace(strings.Replace(this.WatchPathConfig.String(), "WatchPathConfig", "WatchPathConfig", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `CheckInterval:` + fmt.Sprintf("%v", this.CheckInterval) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `HDFSUser:` + fmt.Sprintf("%v", this.HDFSUser) + `,`, + `KrbCCacheSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbCCacheSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KrbKeytabSecret:` + strings.Replace(fmt.Sprintf("%v", this.KrbKeytabSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `KrbUsername:` + fmt.Sprintf("%v", this.KrbUsername) + `,`, + `KrbRealm:` + fmt.Sprintf("%v", this.KrbRealm) + `,`, + `KrbConfigConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.KrbConfigConfigMap), "ConfigMapKeySelector", "v1.ConfigMapKeySelector", 1) + `,`, + `KrbServicePrincipalName:` + fmt.Sprintf("%v", this.KrbServicePrincipalName) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KafkaConsumerGroup) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KafkaConsumerGroup{`, + `GroupName:` + fmt.Sprintf("%v", this.GroupName) + `,`, + `Oldest:` + fmt.Sprintf("%v", this.Oldest) + `,`, + `RebalanceStrategy:` + fmt.Sprintf("%v", this.RebalanceStrategy) + `,`, + `}`, + }, "") + return s +} +func (this *KafkaEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&KafkaEventSource{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `ConsumerGroup:` + strings.Replace(this.ConsumerGroup.String(), "KafkaConsumerGroup", "KafkaConsumerGroup", 1) + `,`, + `LimitEventsPerSecond:` + fmt.Sprintf("%v", this.LimitEventsPerSecond) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SASL:` + strings.Replace(fmt.Sprintf("%v", this.SASL), "SASLConfig", "common.SASLConfig", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `Config:` + fmt.Sprintf("%v", this.Config) + `,`, + `}`, + }, "") + return s +} +func (this *MQTTEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&MQTTEventSource{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `Auth:` + strings.Replace(fmt.Sprintf("%v", this.Auth), "BasicAuth", "common.BasicAuth", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NATSAuth) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NATSAuth{`, + `Basic:` + strings.Replace(fmt.Sprintf("%v", this.Basic), "BasicAuth", "common.BasicAuth", 1) + `,`, + `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `NKey:` + strings.Replace(fmt.Sprintf("%v", this.NKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Credential:` + strings.Replace(fmt.Sprintf("%v", this.Credential), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NATSEventsSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&NATSEventsSource{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Auth:` + strings.Replace(this.Auth.String(), "NATSAuth", "NATSAuth", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `Queue:` + valueToStringGenerated(this.Queue) + `,`, + `}`, + }, "") + return s +} +func (this *NSQEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&NSQEventSource{`, + `HostAddress:` + fmt.Sprintf("%v", this.HostAddress) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `Channel:` + fmt.Sprintf("%v", this.Channel) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *OwnedRepositories) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnedRepositories{`, + `Owner:` + fmt.Sprintf("%v", this.Owner) + `,`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `}`, + }, "") + return s +} +func (this *PayloadEnrichmentFlags) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PayloadEnrichmentFlags{`, + `FetchPROnPRCommentAdded:` + fmt.Sprintf("%v", this.FetchPROnPRCommentAdded) + `,`, + `}`, + }, "") + return s +} +func (this *PubSubEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&PubSubEventSource{`, + `ProjectID:` + fmt.Sprintf("%v", this.ProjectID) + `,`, + `TopicProjectID:` + fmt.Sprintf("%v", this.TopicProjectID) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `SubscriptionID:` + fmt.Sprintf("%v", this.SubscriptionID) + `,`, + `CredentialSecret:` + strings.Replace(fmt.Sprintf("%v", this.CredentialSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `DeleteSubscriptionOnFinish:` + fmt.Sprintf("%v", this.DeleteSubscriptionOnFinish) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PulsarEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + keysForAuthAthenzParams := make([]string, 0, len(this.AuthAthenzParams)) + for k := range this.AuthAthenzParams { + keysForAuthAthenzParams = append(keysForAuthAthenzParams, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuthAthenzParams) + mapStringForAuthAthenzParams := "map[string]string{" + for _, k := range keysForAuthAthenzParams { + mapStringForAuthAthenzParams += fmt.Sprintf("%v: %v,", k, this.AuthAthenzParams[k]) + } + mapStringForAuthAthenzParams += "}" + s := strings.Join([]string{`&PulsarEventSource{`, + `Topics:` + fmt.Sprintf("%v", this.Topics) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `TLSTrustCertsSecret:` + strings.Replace(fmt.Sprintf("%v", this.TLSTrustCertsSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `TLSAllowInsecureConnection:` + fmt.Sprintf("%v", this.TLSAllowInsecureConnection) + `,`, + `TLSValidateHostname:` + fmt.Sprintf("%v", this.TLSValidateHostname) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `AuthTokenSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthTokenSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `AuthAthenzParams:` + mapStringForAuthAthenzParams + `,`, + `AuthAthenzSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthAthenzSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RedisEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&RedisEventSource{`, + `HostAddress:` + fmt.Sprintf("%v", this.HostAddress) + `,`, + `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `DB:` + fmt.Sprintf("%v", this.DB) + `,`, + `Channels:` + fmt.Sprintf("%v", this.Channels) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `}`, + }, "") + return s +} +func (this *RedisStreamEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&RedisStreamEventSource{`, + `HostAddress:` + fmt.Sprintf("%v", this.HostAddress) + `,`, + `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `DB:` + fmt.Sprintf("%v", this.DB) + `,`, + `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`, + `MaxMsgCountPerRead:` + fmt.Sprintf("%v", this.MaxMsgCountPerRead) + `,`, + `ConsumerGroup:` + fmt.Sprintf("%v", this.ConsumerGroup) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&ResourceEventSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "ResourceFilter", "ResourceFilter", 1) + `,`, + `GroupVersionResource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.GroupVersionResource), "GroupVersionResource", "v11.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `EventTypes:` + fmt.Sprintf("%v", this.EventTypes) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Cluster:` + fmt.Sprintf("%v", this.Cluster) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFilter) String() string { + if this == nil { + return "nil" + } + repeatedStringForLabels := "[]Selector{" + for _, f := range this.Labels { + repeatedStringForLabels += strings.Replace(strings.Replace(f.String(), "Selector", "Selector", 1), `&`, ``, 1) + "," + } + repeatedStringForLabels += "}" + repeatedStringForFields := "[]Selector{" + for _, f := range this.Fields { + repeatedStringForFields += strings.Replace(strings.Replace(f.String(), "Selector", "Selector", 1), `&`, ``, 1) + "," + } + repeatedStringForFields += "}" + s := strings.Join([]string{`&ResourceFilter{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `Labels:` + repeatedStringForLabels + `,`, + `Fields:` + repeatedStringForFields + `,`, + `CreatedBy:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedBy), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `AfterStart:` + fmt.Sprintf("%v", this.AfterStart) + `,`, + `}`, + }, "") + return s +} +func (this *SFTPEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&SFTPEventSource{`, + `EventType:` + fmt.Sprintf("%v", this.EventType) + `,`, + `WatchPathConfig:` + strings.Replace(strings.Replace(this.WatchPathConfig.String(), "WatchPathConfig", "WatchPathConfig", 1), `&`, ``, 1) + `,`, + `Username:` + strings.Replace(fmt.Sprintf("%v", this.Username), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Password:` + strings.Replace(fmt.Sprintf("%v", this.Password), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SSHKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.SSHKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Address:` + strings.Replace(fmt.Sprintf("%v", this.Address), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `PollIntervalDuration:` + fmt.Sprintf("%v", this.PollIntervalDuration) + `,`, + `}`, + }, "") + return s +} +func (this *SNSEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&SNSEventSource{`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `TopicArn:` + fmt.Sprintf("%v", this.TopicArn) + `,`, + `AccessKey:` + strings.Replace(fmt.Sprintf("%v", this.AccessKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SecretKey:` + strings.Replace(fmt.Sprintf("%v", this.SecretKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `ValidateSignature:` + fmt.Sprintf("%v", this.ValidateSignature) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `}`, + }, "") + return s +} +func (this *SQSEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&SQSEventSource{`, + `AccessKey:` + strings.Replace(fmt.Sprintf("%v", this.AccessKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `SecretKey:` + strings.Replace(fmt.Sprintf("%v", this.SecretKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `Queue:` + fmt.Sprintf("%v", this.Queue) + `,`, + `WaitTimeSeconds:` + fmt.Sprintf("%v", this.WaitTimeSeconds) + `,`, + `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, + `JSONBody:` + fmt.Sprintf("%v", this.JSONBody) + `,`, + `QueueAccountID:` + fmt.Sprintf("%v", this.QueueAccountID) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `DLQ:` + fmt.Sprintf("%v", this.DLQ) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `SessionToken:` + strings.Replace(fmt.Sprintf("%v", this.SessionToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Selector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Selector{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ServicePort{" + for _, f := range this.Ports { + repeatedStringForPorts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&Service{`, + `Ports:` + repeatedStringForPorts + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `}`, + }, "") + return s +} +func (this *SlackEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&SlackEventSource{`, + `SigningSecret:` + strings.Replace(fmt.Sprintf("%v", this.SigningSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Token:` + strings.Replace(fmt.Sprintf("%v", this.Token), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageGridEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&StorageGridEventSource{`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `Events:` + fmt.Sprintf("%v", this.Events) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "StorageGridFilter", "StorageGridFilter", 1) + `,`, + `TopicArn:` + fmt.Sprintf("%v", this.TopicArn) + `,`, + `Bucket:` + fmt.Sprintf("%v", this.Bucket) + `,`, + `Region:` + fmt.Sprintf("%v", this.Region) + `,`, + `AuthToken:` + strings.Replace(fmt.Sprintf("%v", this.AuthToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `APIURL:` + fmt.Sprintf("%v", this.APIURL) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `}`, + }, "") + return s +} +func (this *StorageGridFilter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageGridFilter{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `Suffix:` + fmt.Sprintf("%v", this.Suffix) + `,`, + `}`, + }, "") + return s +} +func (this *StripeEventSource) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&StripeEventSource{`, + `Webhook:` + strings.Replace(this.Webhook.String(), "WebhookContext", "WebhookContext", 1) + `,`, + `CreateWebhook:` + fmt.Sprintf("%v", this.CreateWebhook) + `,`, + `APIKey:` + strings.Replace(fmt.Sprintf("%v", this.APIKey), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `EventFilter:` + fmt.Sprintf("%v", this.EventFilter) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `}`, + }, "") + return s +} +func (this *Template) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += fmt.Sprintf("%v", f) + "," + } + repeatedStringForImagePullSecrets += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Template{`, + `Metadata:` + strings.Replace(fmt.Sprintf("%v", this.Metadata), "Metadata", "common.Metadata", 1) + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "Container", "v1.Container", 1) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "v1.PodSecurityContext", 1) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "v1.Affinity", 1) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `}`, + }, "") + return s +} +func (this *WatchPathConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchPathConfig{`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `PathRegexp:` + fmt.Sprintf("%v", this.PathRegexp) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookContext) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&WebhookContext{`, + `Endpoint:` + fmt.Sprintf("%v", this.Endpoint) + `,`, + `Method:` + fmt.Sprintf("%v", this.Method) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `ServerCertSecret:` + strings.Replace(fmt.Sprintf("%v", this.ServerCertSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `ServerKeySecret:` + strings.Replace(fmt.Sprintf("%v", this.ServerKeySecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `AuthSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `MaxPayloadSize:` + valueToStringGenerated(this.MaxPayloadSize) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookEventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookEventSource{`, + `WebhookContext:` + strings.Replace(strings.Replace(this.WebhookContext.String(), "WebhookContext", "WebhookContext", 1), `&`, ``, 1) + `,`, + `Filter:` + strings.Replace(this.Filter.String(), "EventSourceFilter", "EventSourceFilter", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AMQPConsumeConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AMQPConsumeConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerTag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsumerTag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoAck", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoAck = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exclusive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exclusive = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoLocal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoLocal = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AMQPEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AMQPEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExchangeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExchangeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExchangeType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExchangeType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoutingKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RoutingKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectionBackoff == nil { + m.ConnectionBackoff = &common.Backoff{} + } + if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExchangeDeclare", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExchangeDeclare == nil { + m.ExchangeDeclare = &AMQPExchangeDeclareConfig{} + } + if err := m.ExchangeDeclare.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueDeclare", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.QueueDeclare == nil { + m.QueueDeclare = &AMQPQueueDeclareConfig{} + } + if err := m.QueueDeclare.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueBind", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.QueueBind == nil { + m.QueueBind = &AMQPQueueBindConfig{} + } + if err := m.QueueBind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Consume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Consume == nil { + m.Consume = &AMQPConsumeConfig{} + } + if err := m.Consume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Auth == nil { + m.Auth = &common.BasicAuth{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URLSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.URLSecret == nil { + m.URLSecret = &v1.SecretKeySelector{} + } + if err := m.URLSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AMQPExchangeDeclareConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AMQPExchangeDeclareConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AMQPExchangeDeclareConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Durable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Durable = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoDelete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoDelete = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AMQPQueueBindConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AMQPQueueBindConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AMQPQueueBindConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AMQPQueueDeclareConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AMQPQueueDeclareConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AMQPQueueDeclareConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Durable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Durable = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoDelete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoDelete = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exclusive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exclusive = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Arguments", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Arguments = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureEventsHubEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureEventsHubEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FQDN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FQDN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SharedAccessKeyName", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SharedAccessKeyName == nil { + m.SharedAccessKeyName = &v1.SecretKeySelector{} + } + if err := m.SharedAccessKeyName.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SharedAccessKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SharedAccessKey == nil { + m.SharedAccessKey = &v1.SecretKeySelector{} + } + if err := m.SharedAccessKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HubName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HubName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureQueueStorageEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureQueueStorageEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureQueueStorageEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionString", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectionString == nil { + m.ConnectionString = &v1.SecretKeySelector{} + } + if err := m.ConnectionString.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueueName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DLQ", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DLQ = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DecodeMessage", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DecodeMessage = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WaitTimeInSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.WaitTimeInSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureServiceBusEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureServiceBusEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureServiceBusEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionString", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectionString == nil { + m.ConnectionString = &v1.SecretKeySelector{} + } + if err := m.ConnectionString.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QueueName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopicName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopicName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyQualifiedNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FullyQualifiedNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Basic == nil { + m.Basic = &BitbucketBasicAuth{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OAuthToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OAuthToken == nil { + m.OAuthToken = &v1.SecretKeySelector{} + } + if err := m.OAuthToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketBasicAuth) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketBasicAuth: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketBasicAuth: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Username == nil { + m.Username = &v1.SecretKeySelector{} + } + if err := m.Username.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Password == nil { + m.Password = &v1.SecretKeySelector{} + } + if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteHookOnFinish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeleteHookOnFinish = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Webhook == nil { + m.Webhook = &WebhookContext{} + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Auth == nil { + m.Auth = &BitbucketAuth{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedOwner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedOwner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedProjectKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedProjectKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedRepositorySlug", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedRepositorySlug = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repositories", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repositories = append(m.Repositories, BitbucketRepository{}) + if err := m.Repositories[len(m.Repositories)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepositorySlug", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepositorySlug = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketServerEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketServerEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketServerEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Webhook == nil { + m.Webhook = &WebhookContext{} + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedProjectKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedProjectKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedRepositorySlug", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedRepositorySlug = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Projects = append(m.Projects, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repositories", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repositories = append(m.Repositories, BitbucketServerRepository{}) + if err := m.Repositories[len(m.Repositories)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipBranchRefsChangedOnOpenPR", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipBranchRefsChangedOnOpenPR = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AccessToken == nil { + m.AccessToken = &v1.SecretKeySelector{} + } + if err := m.AccessToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WebhookSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WebhookSecret == nil { + m.WebhookSecret = &v1.SecretKeySelector{} + } + if err := m.WebhookSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketServerBaseURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BitbucketServerBaseURL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteHookOnFinish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeleteHookOnFinish = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CheckInterval", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CheckInterval = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketServerRepository) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketServerRepository: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketServerRepository: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProjectKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProjectKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepositorySlug", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepositorySlug = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CalendarEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CalendarEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Interval = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExclusionDates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExclusionDates = append(m.ExclusionDates, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timezone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Timezone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Persistence == nil { + m.Persistence = &EventPersistence{} + } + if err := m.Persistence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CatchupConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CatchupConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CatchupConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Enabled = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxDuration", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MaxDuration = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapPersistence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapPersistence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapPersistence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateIfNotExist", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CreateIfNotExist = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmitterEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmitterEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Broker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Broker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Username == nil { + m.Username = &v1.SecretKeySelector{} + } + if err := m.Username.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Password == nil { + m.Password = &v1.SecretKeySelector{} + } + if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectionBackoff == nil { + m.ConnectionBackoff = &common.Backoff{} + } + if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventPersistence) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventPersistence: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventPersistence: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Catchup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Catchup == nil { + m.Catchup = &CatchupConfiguration{} + } + if err := m.Catchup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapPersistence{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil } -func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { +func (m *EventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8557,17 +17117,17 @@ func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AMQPConsumeConfig: wiretype end group for non-group") + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AMQPConsumeConfig: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerTag", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8577,29 +17137,30 @@ func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ConsumerTag = string(dAtA[iNdEx:postIndex]) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoAck", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8609,17 +17170,30 @@ func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.AutoAck = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exclusive", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8629,37 +17203,80 @@ func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Exclusive = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoLocal", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.NoLocal = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + if postIndex > l { + return io.ErrUnexpectedEOF } - var v int + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSourceFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSourceFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSourceFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expression", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8669,12 +17286,24 @@ func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.NoWait = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -8696,7 +17325,7 @@ func (m *AMQPConsumeConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { +func (m *EventSourceList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8719,17 +17348,17 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AMQPEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: EventSourceList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AMQPEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventSourceList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8739,29 +17368,30 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExchangeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8771,27 +17401,79 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExchangeName = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, EventSource{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSourceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSourceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExchangeType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventBusName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8819,13 +17501,13 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ExchangeType = string(dAtA[iNdEx:postIndex]) + m.EventBusName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoutingKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8835,27 +17517,31 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.RoutingKey = string(dAtA[iNdEx:postIndex]) + if m.Template == nil { + m.Template = &Template{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8882,18 +17568,18 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConnectionBackoff == nil { - m.ConnectionBackoff = &common.Backoff{} + if m.Service == nil { + m.Service = &Service{} } - if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Minio", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8903,15 +17589,124 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.JSONBody = bool(v != 0) - case 7: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Minio == nil { + m.Minio = make(map[string]common.S3Artifact) + } + var mapkey string + mapvalue := &common.S3Artifact{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &common.S3Artifact{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Minio[mapkey] = *mapvalue + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Calendar", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8928,26 +17723,119 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Calendar == nil { + m.Calendar = make(map[string]CalendarEventSource) + } + var mapkey string + mapvalue := &CalendarEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &CalendarEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Calendar[mapkey] = *mapvalue iNdEx = postIndex - case 8: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8974,11 +17862,11 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + if m.File == nil { + m.File = make(map[string]FileEventSource) } var mapkey string - var mapvalue string + mapvalue := &FileEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -9027,7 +17915,7 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9037,24 +17925,26 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + if mapmsglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } - if postStringIndexmapvalue > l { + if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = &FileEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9070,11 +17960,11 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Metadata[mapkey] = mapvalue + m.File[mapkey] = *mapvalue iNdEx = postIndex - case 9: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExchangeDeclare", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9101,52 +17991,109 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ExchangeDeclare == nil { - m.ExchangeDeclare = &AMQPExchangeDeclareConfig{} - } - if err := m.ExchangeDeclare.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QueueDeclare", wireType) + if m.Resource == nil { + m.Resource = make(map[string]ResourceEventSource) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &ResourceEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ResourceEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.QueueDeclare == nil { - m.QueueDeclare = &AMQPQueueDeclareConfig{} - } - if err := m.QueueDeclare.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Resource[mapkey] = *mapvalue iNdEx = postIndex - case 11: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QueueBind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9173,52 +18120,109 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.QueueBind == nil { - m.QueueBind = &AMQPQueueBindConfig{} - } - if err := m.QueueBind.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Consume", wireType) + if m.Webhook == nil { + m.Webhook = make(map[string]WebhookEventSource) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &WebhookEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &WebhookEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Consume == nil { - m.Consume = &AMQPConsumeConfig{} - } - if err := m.Consume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Webhook[mapkey] = *mapvalue iNdEx = postIndex - case 13: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AMQP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9245,128 +18249,111 @@ func (m *AMQPEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Auth == nil { - m.Auth = &common.BasicAuth{} - } - if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AMQPExchangeDeclareConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AMQPExchangeDeclareConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AMQPExchangeDeclareConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Durable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Durable = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoDelete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AutoDelete = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + if m.AMQP == nil { + m.AMQP = make(map[string]AMQPEventSource) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &AMQPEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &AMQPEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - m.Internal = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + m.AMQP[mapkey] = *mapvalue + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9376,137 +18363,126 @@ func (m *AMQPExchangeDeclareConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.NoWait = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AMQPQueueBindConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AMQPQueueBindConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AMQPQueueBindConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoWait = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AMQPQueueDeclareConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.Kafka == nil { + m.Kafka = make(map[string]KafkaEventSource) } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AMQPQueueDeclareConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AMQPQueueDeclareConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + var mapkey string + mapvalue := &KafkaEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &KafkaEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Kafka[mapkey] = *mapvalue + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MQTT", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9516,189 +18492,124 @@ func (m *AMQPQueueDeclareConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Durable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Durable = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutoDelete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AutoDelete = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exclusive", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Exclusive = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.NoWait = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureEventsHubEventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureEventsHubEventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FQDN", wireType) + if m.MQTT == nil { + m.MQTT = make(map[string]MQTTEventSource) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &MQTTEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &MQTTEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FQDN = string(dAtA[iNdEx:postIndex]) + m.MQTT[mapkey] = *mapvalue iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SharedAccessKeyName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NATS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9725,16 +18636,109 @@ func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SharedAccessKeyName == nil { - m.SharedAccessKeyName = &v1.SecretKeySelector{} + if m.NATS == nil { + m.NATS = make(map[string]NATSEventsSource) } - if err := m.SharedAccessKeyName.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := &NATSEventsSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &NATSEventsSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.NATS[mapkey] = *mapvalue iNdEx = postIndex - case 3: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SharedAccessKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SNS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9761,48 +18765,109 @@ func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SharedAccessKey == nil { - m.SharedAccessKey = &v1.SecretKeySelector{} - } - if err := m.SharedAccessKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HubName", wireType) + if m.SNS == nil { + m.SNS = make(map[string]SNSEventSource) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &SNSEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &SNSEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HubName = string(dAtA[iNdEx:postIndex]) + m.SNS[mapkey] = *mapvalue iNdEx = postIndex - case 5: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SQS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9829,11 +18894,11 @@ func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + if m.SQS == nil { + m.SQS = make(map[string]SQSEventSource) } var mapkey string - var mapvalue string + mapvalue := &SQSEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -9882,7 +18947,7 @@ func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9892,24 +18957,26 @@ func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + if mapmsglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } - if postStringIndexmapvalue > l { + if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = &SQSEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9925,159 +18992,13 @@ func (m *AzureEventsHubEventSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Metadata[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CalendarEventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CalendarEventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schedule = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Interval = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExclusionDates", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExclusionDates = append(m.ExclusionDates, string(dAtA[iNdEx:postIndex])) + m.SQS[mapkey] = *mapvalue iNdEx = postIndex - case 4: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timezone", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PubSub", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10087,61 +19008,124 @@ func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Timezone = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserPayload", wireType) + if m.PubSub == nil { + m.PubSub = make(map[string]PubSubEventSource) } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &PubSubEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &PubSubEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UserPayload = append(m.UserPayload[:0], dAtA[iNdEx:postIndex]...) - if m.UserPayload == nil { - m.UserPayload = []byte{} - } + m.PubSub[mapkey] = *mapvalue iNdEx = postIndex - case 6: + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Github", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10168,11 +19152,11 @@ func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + if m.Github == nil { + m.Github = make(map[string]GithubEventSource) } var mapkey string - var mapvalue string + mapvalue := &GithubEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -10221,7 +19205,7 @@ func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10231,24 +19215,26 @@ func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + if mapmsglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } - if postStringIndexmapvalue > l { + if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = &GithubEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10264,11 +19250,11 @@ func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Metadata[mapkey] = mapvalue + m.Github[mapkey] = *mapvalue iNdEx = postIndex - case 7: + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Persistence", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gitlab", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10295,170 +19281,111 @@ func (m *CalendarEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Persistence == nil { - m.Persistence = &EventPersistence{} - } - if err := m.Persistence.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CatchupConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CatchupConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CatchupConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxDuration", wireType) + if m.Gitlab == nil { + m.Gitlab = make(map[string]GitlabEventSource) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &GitlabEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &GitlabEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MaxDuration = string(dAtA[iNdEx:postIndex]) + m.Gitlab[mapkey] = *mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapPersistence) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapPersistence: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapPersistence: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HDFS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10468,99 +19395,126 @@ func (m *ConfigMapPersistence) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateIfNotExist", wireType) + if m.HDFS == nil { + m.HDFS = make(map[string]HDFSEventSource) } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + mapvalue := &HDFSEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - } - m.CreateIfNotExist = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmitterEventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmitterEventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &HDFSEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.HDFS[mapkey] = *mapvalue + iNdEx = postIndex + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Broker", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Slack", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10570,61 +19524,126 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Broker = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChannelKey", wireType) + if m.Slack == nil { + m.Slack = make(map[string]SlackEventSource) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &SlackEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &SlackEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChannelKey = string(dAtA[iNdEx:postIndex]) + m.Slack[mapkey] = *mapvalue iNdEx = postIndex - case 3: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChannelName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StorageGrid", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10634,63 +19653,124 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ChannelName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + if m.StorageGrid == nil { + m.StorageGrid = make(map[string]StorageGridEventSource) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &StorageGridEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StorageGridEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Username == nil { - m.Username = &v1.SecretKeySelector{} - } - if err := m.Username.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.StorageGrid[mapkey] = *mapvalue iNdEx = postIndex - case 5: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AzureEventsHub", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10717,16 +19797,109 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Password == nil { - m.Password = &v1.SecretKeySelector{} + if m.AzureEventsHub == nil { + m.AzureEventsHub = make(map[string]AzureEventsHubEventSource) } - if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := &AzureEventsHubEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &AzureEventsHubEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.AzureEventsHub[mapkey] = *mapvalue iNdEx = postIndex - case 6: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stripe", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10753,72 +19926,109 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConnectionBackoff == nil { - m.ConnectionBackoff = &common.Backoff{} - } - if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.JSONBody = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + if m.Stripe == nil { + m.Stripe = make(map[string]StripeEventSource) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &StripeEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &StripeEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Stripe[mapkey] = *mapvalue iNdEx = postIndex - case 9: + case 23: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Emitter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10845,11 +20055,11 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + if m.Emitter == nil { + m.Emitter = make(map[string]EmitterEventSource) } var mapkey string - var mapvalue string + mapvalue := &EmitterEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -10898,7 +20108,7 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10908,24 +20118,26 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift + mapmsglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + if mapmsglen < 0 { return ErrInvalidLengthGenerated } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { return ErrInvalidLengthGenerated } - if postStringIndexmapvalue > l { + if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = &EmitterEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10937,220 +20149,15 @@ func (m *EmitterEventSource) Unmarshal(dAtA []byte) error { } if (iNdEx + skippy) > postIndex { return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metadata[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventPersistence) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventPersistence: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventPersistence: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Catchup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Catchup == nil { - m.Catchup = &CatchupConfiguration{} - } - if err := m.Catchup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMap == nil { - m.ConfigMap = &ConfigMapPersistence{} - } - if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Emitter[mapkey] = *mapvalue iNdEx = postIndex - case 2: + case 24: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Redis", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11177,13 +20184,109 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Redis == nil { + m.Redis = make(map[string]RedisEventSource) + } + var mapkey string + mapvalue := &RedisEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &RedisEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Redis[mapkey] = *mapvalue iNdEx = postIndex - case 3: + case 25: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NSQ", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11210,63 +20313,109 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSourceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.NSQ == nil { + m.NSQ = make(map[string]NSQEventSource) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + mapvalue := &NSQEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &NSQEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.NSQ[mapkey] = *mapvalue + iNdEx = postIndex + case 26: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pulsar", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11293,13 +20442,109 @@ func (m *EventSourceList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.Pulsar == nil { + m.Pulsar = make(map[string]PulsarEventSource) + } + var mapkey string + mapvalue := &PulsarEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &PulsarEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.Pulsar[mapkey] = *mapvalue iNdEx = postIndex - case 2: + case 27: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11326,66 +20571,111 @@ func (m *EventSourceList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, EventSource{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Generic == nil { + m.Generic = make(map[string]GenericEventSource) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + mapvalue := &GenericEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &GenericEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSourceSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSourceSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventBusName", wireType) + m.Generic[mapkey] = *mapvalue + iNdEx = postIndex + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11395,27 +20685,15 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EventBusName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + m.Replicas = &v + case 29: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketServer", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11442,16 +20720,109 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Template == nil { - m.Template = &Template{} + if m.BitbucketServer == nil { + m.BitbucketServer = make(map[string]BitbucketServerEventSource) } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var mapkey string + mapvalue := &BitbucketServerEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BitbucketServerEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.BitbucketServer[mapkey] = *mapvalue iNdEx = postIndex - case 3: + case 30: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Bitbucket", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11478,36 +20849,109 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Service == nil { - m.Service = &Service{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedReplica", wireType) + if m.Bitbucket == nil { + m.Bitbucket = make(map[string]BitbucketEventSource) } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + mapvalue := &BitbucketEventSource{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BitbucketEventSource{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - m.DeprecatedReplica = &v - case 5: + m.Bitbucket[mapkey] = *mapvalue + iNdEx = postIndex + case 31: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Minio", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RedisStream", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11534,11 +20978,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Minio == nil { - m.Minio = make(map[string]common.S3Artifact) + if m.RedisStream == nil { + m.RedisStream = make(map[string]RedisStreamEventSource) } var mapkey string - mapvalue := &common.S3Artifact{} + mapvalue := &RedisStreamEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -11612,7 +21056,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &common.S3Artifact{} + mapvalue = &RedisStreamEventSource{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -11632,11 +21076,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Minio[mapkey] = *mapvalue + m.RedisStream[mapkey] = *mapvalue iNdEx = postIndex - case 6: + case 32: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Calendar", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AzureServiceBus", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11663,11 +21107,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Calendar == nil { - m.Calendar = make(map[string]CalendarEventSource) + if m.AzureServiceBus == nil { + m.AzureServiceBus = make(map[string]AzureServiceBusEventSource) } var mapkey string - mapvalue := &CalendarEventSource{} + mapvalue := &AzureServiceBusEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -11741,7 +21185,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &CalendarEventSource{} + mapvalue = &AzureServiceBusEventSource{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -11761,11 +21205,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Calendar[mapkey] = *mapvalue + m.AzureServiceBus[mapkey] = *mapvalue iNdEx = postIndex - case 7: + case 33: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AzureQueueStorage", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11792,11 +21236,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.File == nil { - m.File = make(map[string]FileEventSource) + if m.AzureQueueStorage == nil { + m.AzureQueueStorage = make(map[string]AzureQueueStorageEventSource) } var mapkey string - mapvalue := &FileEventSource{} + mapvalue := &AzureQueueStorageEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -11870,7 +21314,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &FileEventSource{} + mapvalue = &AzureQueueStorageEventSource{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -11890,11 +21334,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.File[mapkey] = *mapvalue + m.AzureQueueStorage[mapkey] = *mapvalue iNdEx = postIndex - case 8: + case 34: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SFTP", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11921,11 +21365,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Resource == nil { - m.Resource = make(map[string]ResourceEventSource) + if m.SFTP == nil { + m.SFTP = make(map[string]SFTPEventSource) } var mapkey string - mapvalue := &ResourceEventSource{} + mapvalue := &SFTPEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -11999,7 +21443,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &ResourceEventSource{} + mapvalue = &SFTPEventSource{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -12019,11 +21463,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Resource[mapkey] = *mapvalue + m.SFTP[mapkey] = *mapvalue iNdEx = postIndex - case 9: + case 35: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Gerrit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12050,11 +21494,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Webhook == nil { - m.Webhook = make(map[string]WebhookContext) + if m.Gerrit == nil { + m.Gerrit = make(map[string]GerritEventSource) } var mapkey string - mapvalue := &WebhookContext{} + mapvalue := &GerritEventSource{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -12128,7 +21572,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &WebhookContext{} + mapvalue = &GerritEventSource{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -12148,11 +21592,229 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Webhook[mapkey] = *mapvalue + m.Gerrit[mapkey] = *mapvalue iNdEx = postIndex - case 10: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSourceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSourceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AMQP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EventType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchPathConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.WatchPathConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Polling", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Polling = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12179,11 +21841,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AMQP == nil { - m.AMQP = make(map[string]AMQPEventSource) + if m.Metadata == nil { + m.Metadata = make(map[string]string) } var mapkey string - mapvalue := &AMQPEventSource{} + var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -12232,7 +21894,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var mapmsglen int + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12242,26 +21904,24 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { return ErrInvalidLengthGenerated } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { return ErrInvalidLengthGenerated } - if postmsgIndex > l { + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - mapvalue = &AMQPEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12277,11 +21937,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.AMQP[mapkey] = *mapvalue + m.Metadata[mapkey] = mapvalue iNdEx = postIndex - case 11: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kafka", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12308,109 +21968,170 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Kafka == nil { - m.Kafka = make(map[string]KafkaEventSource) + if m.Filter == nil { + m.Filter = &EventSourceFilter{} } - var mapkey string - mapvalue := &KafkaEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &KafkaEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Config = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Kafka[mapkey] = *mapvalue - iNdEx = postIndex - case 12: + m.Insecure = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MQTT", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12437,11 +22158,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.MQTT == nil { - m.MQTT = make(map[string]MQTTEventSource) + if m.Metadata == nil { + m.Metadata = make(map[string]string) } var mapkey string - mapvalue := &MQTTEventSource{} + var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -12490,7 +22211,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var mapmsglen int + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12500,26 +22221,24 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { return ErrInvalidLengthGenerated } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { return ErrInvalidLengthGenerated } - if postmsgIndex > l { + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - mapvalue = &MQTTEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12535,11 +22254,233 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.MQTT[mapkey] = *mapvalue + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthSecret == nil { + m.AuthSecret = &v1.SecretKeySelector{} + } + if err := m.AuthSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GerritEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GerritEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GerritEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Webhook == nil { + m.Webhook = &WebhookContext{} + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HookName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HookName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 13: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NATS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12566,111 +22507,18 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NATS == nil { - m.NATS = make(map[string]NATSEventsSource) + if m.Auth == nil { + m.Auth = &common.BasicAuth{} } - var mapkey string - mapvalue := &NATSEventsSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &NATSEventsSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.NATS[mapkey] = *mapvalue iNdEx = postIndex - case 14: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SNS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GerritBaseURL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12680,124 +22528,47 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SNS == nil { - m.SNS = make(map[string]SNSEventSource) + m.GerritBaseURL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteHookOnFinish", wireType) } - var mapkey string - mapvalue := &SNSEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &SNSEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.SNS[mapkey] = *mapvalue - iNdEx = postIndex - case 15: + m.DeleteHookOnFinish = bool(v != 0) + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SQS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12824,11 +22595,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SQS == nil { - m.SQS = make(map[string]SQSEventSource) + if m.Metadata == nil { + m.Metadata = make(map[string]string) } var mapkey string - mapvalue := &SQSEventSource{} + var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -12877,7 +22648,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var mapmsglen int + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -12887,26 +22658,24 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { return ErrInvalidLengthGenerated } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { return ErrInvalidLengthGenerated } - if postmsgIndex > l { + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - mapvalue = &SQSEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12922,11 +22691,292 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.SQS[mapkey] = *mapvalue - iNdEx = postIndex - case 16: + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Projects = append(m.Projects, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SslVerify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SslVerify = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GithubAppCreds) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GithubAppCreds: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GithubAppCreds: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PrivateKey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PrivateKey == nil { + m.PrivateKey = &v1.SecretKeySelector{} + } + if err := m.PrivateKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppID", wireType) + } + m.AppID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InstallationID", wireType) + } + m.InstallationID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InstallationID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GithubEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GithubEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GithubEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PubSub", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12953,111 +23003,18 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.PubSub == nil { - m.PubSub = make(map[string]PubSubEventSource) + if m.Webhook == nil { + m.Webhook = &WebhookContext{} } - var mapkey string - mapvalue := &PubSubEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &PubSubEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.PubSub[mapkey] = *mapvalue iNdEx = postIndex - case 17: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Github", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedOwner", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13067,126 +23024,29 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Github == nil { - m.Github = make(map[string]GithubEventSource) - } - var mapkey string - mapvalue := &GithubEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &GithubEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Github[mapkey] = *mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedOwner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 18: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gitlab", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedRepository", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13196,124 +23056,59 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Gitlab == nil { - m.Gitlab = make(map[string]GitlabEventSource) + m.DeprecatedRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var mapkey string - mapvalue := &GitlabEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &GitlabEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.Gitlab[mapkey] = *mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 19: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIToken", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13340,109 +23135,16 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.HDFS == nil { - m.HDFS = make(map[string]HDFSEventSource) + if m.APIToken == nil { + m.APIToken = &v1.SecretKeySelector{} } - var mapkey string - mapvalue := &HDFSEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &HDFSEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.APIToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.HDFS[mapkey] = *mapvalue iNdEx = postIndex - case 20: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Slack", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WebhookSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13469,111 +23171,58 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Slack == nil { - m.Slack = make(map[string]SlackEventSource) + if m.WebhookSecret == nil { + m.WebhookSecret = &v1.SecretKeySelector{} } - var mapkey string - mapvalue := &SlackEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.WebhookSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &SlackEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Slack[mapkey] = *mapvalue - iNdEx = postIndex - case 21: + m.Insecure = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Active = bool(v != 0) + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageGrid", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13583,126 +23232,29 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StorageGrid == nil { - m.StorageGrid = make(map[string]StorageGridEventSource) - } - var mapkey string - mapvalue := &StorageGridEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &StorageGridEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.StorageGrid[mapkey] = *mapvalue + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 22: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureEventsHub", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GithubBaseURL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13712,126 +23264,29 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.AzureEventsHub == nil { - m.AzureEventsHub = make(map[string]AzureEventsHubEventSource) - } - var mapkey string - mapvalue := &AzureEventsHubEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &AzureEventsHubEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.AzureEventsHub[mapkey] = *mapvalue + m.GithubBaseURL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 23: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stripe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GithubUploadURL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -13841,124 +23296,47 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Stripe == nil { - m.Stripe = make(map[string]StripeEventSource) + m.GithubUploadURL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteHookOnFinish", wireType) } - var mapkey string - mapvalue := &StripeEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &StripeEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Stripe[mapkey] = *mapvalue - iNdEx = postIndex - case 24: + m.DeleteHookOnFinish = bool(v != 0) + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Emitter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13985,11 +23363,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Emitter == nil { - m.Emitter = make(map[string]EmitterEventSource) + if m.Metadata == nil { + m.Metadata = make(map[string]string) } var mapkey string - mapvalue := &EmitterEventSource{} + var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -14038,7 +23416,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var mapmsglen int + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14048,26 +23426,24 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { return ErrInvalidLengthGenerated } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { return ErrInvalidLengthGenerated } - if postmsgIndex > l { + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - mapvalue = &EmitterEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14083,13 +23459,270 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Emitter[mapkey] = *mapvalue + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repositories", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repositories = append(m.Repositories, OwnedRepositories{}) + if err := m.Repositories[len(m.Repositories)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Organizations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Organizations = append(m.Organizations, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GithubApp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GithubApp == nil { + m.GithubApp = &GithubAppCreds{} + } + if err := m.GithubApp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PayloadEnrichment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PayloadEnrichment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitlabEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitlabEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Webhook == nil { + m.Webhook = &WebhookContext{} + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 25: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Redis", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedProjectID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14099,124 +23732,59 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Redis == nil { - m.Redis = make(map[string]RedisEventSource) + m.DeprecatedProjectID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } - var mapkey string - mapvalue := &RedisEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &RedisEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break } } - m.Redis[mapkey] = *mapvalue + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 26: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NSQ", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AccessToken", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14243,109 +23811,88 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NSQ == nil { - m.NSQ = make(map[string]NSQEventSource) + if m.AccessToken == nil { + m.AccessToken = &v1.SecretKeySelector{} } - var mapkey string - mapvalue := &NSQEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &NSQEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if err := m.AccessToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableSSLVerification", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.NSQ[mapkey] = *mapvalue + m.EnableSSLVerification = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitlabBaseURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GitlabBaseURL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 27: + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteHookOnFinish", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeleteHookOnFinish = bool(v != 0) + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pulsar", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14372,11 +23919,11 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Pulsar == nil { - m.Pulsar = make(map[string]PulsarEventSource) + if m.Metadata == nil { + m.Metadata = make(map[string]string) } var mapkey string - mapvalue := &PulsarEventSource{} + var mapvalue string for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -14425,7 +23972,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var mapmsglen int + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14435,26 +23982,24 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - mapmsglen |= int(b&0x7F) << shift + stringLenmapvalue |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if mapmsglen < 0 { + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { return ErrInvalidLengthGenerated } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { return ErrInvalidLengthGenerated } - if postmsgIndex > l { + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - mapvalue = &PulsarEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue } else { iNdEx = entryPreIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14470,11 +24015,43 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Pulsar[mapkey] = *mapvalue + m.Metadata[mapkey] = mapvalue iNdEx = postIndex - case 28: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Projects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Projects = append(m.Projects, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretToken", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14501,111 +24078,54 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Generic == nil { - m.Generic = make(map[string]GenericEventSource) + if m.SecretToken == nil { + m.SecretToken = &v1.SecretKeySelector{} } - var mapkey string - mapvalue := &GenericEventSource{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.SecretToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &GenericEventSource{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Generic[mapkey] = *mapvalue + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) } - var v int32 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14615,12 +24135,24 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Replicas = &v + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14642,7 +24174,7 @@ func (m *EventSourceSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *EventSourceStatus) Unmarshal(dAtA []byte) error { +func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14665,15 +24197,15 @@ func (m *EventSourceStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EventSourceStatus: wiretype end group for non-group") + return fmt.Errorf("proto: HDFSEventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EventSourceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HDFSEventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WatchPathConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14700,63 +24232,13 @@ func (m *EventSourceStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.WatchPathConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileEventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileEventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileEventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14784,13 +24266,13 @@ func (m *FileEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EventType = string(dAtA[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchPathConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CheckInterval", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14800,30 +24282,29 @@ func (m *FileEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.WatchPathConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.CheckInterval = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Polling", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14833,17 +24314,29 @@ func (m *FileEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Polling = bool(v != 0) - case 4: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HDFSUser", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14853,172 +24346,99 @@ func (m *FileEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + m.HDFSUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbCCacheSecret", wireType) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Metadata[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + if msglen < 0 { + return ErrInvalidLengthGenerated } - if (skippy < 0) || (iNdEx+skippy) < 0 { + postIndex := iNdEx + msglen + if postIndex < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenericEventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if m.KrbCCacheSecret == nil { + m.KrbCCacheSecret = &v1.SecretKeySelector{} } - if iNdEx >= l { + if err := m.KrbCCacheSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbKeytabSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if m.KrbKeytabSecret == nil { + m.KrbKeytabSecret = &v1.SecretKeySelector{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenericEventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenericEventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if err := m.KrbKeytabSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KrbUsername", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15046,11 +24466,11 @@ func (m *GenericEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.KrbUsername = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KrbRealm", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15078,13 +24498,13 @@ func (m *GenericEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Config = string(dAtA[iNdEx:postIndex]) + m.KrbRealm = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbConfigConfigMap", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15094,17 +24514,33 @@ func (m *GenericEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Insecure = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.KrbConfigConfigMap == nil { + m.KrbConfigConfigMap = &v1.ConfigMapKeySelector{} + } + if err := m.KrbConfigConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KrbServicePrincipalName", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15114,13 +24550,25 @@ func (m *GenericEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.JSONBody = bool(v != 0) - case 5: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KrbServicePrincipalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -15247,9 +24695,9 @@ func (m *GenericEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex - case 6: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15276,10 +24724,10 @@ func (m *GenericEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AuthSecret == nil { - m.AuthSecret = &v1.SecretKeySelector{} + if m.Filter == nil { + m.Filter = &EventSourceFilter{} } - if err := m.AuthSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -15304,7 +24752,7 @@ func (m *GenericEventSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GithubEventSource) Unmarshal(dAtA []byte) error { +func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15327,17 +24775,17 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GithubEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: KafkaConsumerGroup: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GithubEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: KafkaConsumerGroup: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupName", wireType) } - m.ID = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15347,16 +24795,49 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ID |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Oldest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Oldest = bool(v != 0) + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RebalanceStrategy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15366,31 +24847,77 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Webhook == nil { - m.Webhook = &WebhookContext{} + m.RebalanceStrategy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KafkaEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KafkaEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedOwner", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15418,11 +24945,11 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedOwner = string(dAtA[iNdEx:postIndex]) + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedRepository", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15450,11 +24977,11 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedRepository = string(dAtA[iNdEx:postIndex]) + m.Partition = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15482,11 +25009,11 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, string(dAtA[iNdEx:postIndex])) + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIToken", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15513,16 +25040,16 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.APIToken == nil { - m.APIToken = &v1.SecretKeySelector{} + if m.ConnectionBackoff == nil { + m.ConnectionBackoff = &common.Backoff{} } - if err := m.APIToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WebhookSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15549,16 +25076,16 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.WebhookSecret == nil { - m.WebhookSecret = &v1.SecretKeySelector{} + if m.TLS == nil { + m.TLS = &common.TLSConfig{} } - if err := m.WebhookSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -15575,12 +25102,12 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { break } } - m.Insecure = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + m.JSONBody = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15590,17 +25117,124 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Active = bool(v != 0) - case 10: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGroup", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15610,29 +25244,33 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContentType = string(dAtA[iNdEx:postIndex]) + if m.ConsumerGroup == nil { + m.ConsumerGroup = &KafkaConsumerGroup{} + } + if err := m.ConsumerGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GithubBaseURL", wireType) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitEventsPerSecond", wireType) } - var stringLen uint64 + m.LimitEventsPerSecond = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15642,27 +25280,14 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.LimitEventsPerSecond |= int64(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GithubBaseURL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GithubUploadURL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15690,13 +25315,13 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.GithubUploadURL = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteHookOnFinish", wireType) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15706,15 +25331,31 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.DeleteHookOnFinish = bool(v != 0) - case 14: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SASL == nil { + m.SASL = &common.SASLConfig{} + } + if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -15741,109 +25382,18 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + if m.Filter == nil { + m.Filter = &EventSourceFilter{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Metadata[mapkey] = mapvalue iNdEx = postIndex - case 15: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repositories", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15853,25 +25403,23 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Repositories = append(m.Repositories, OwnedRepositories{}) - if err := m.Repositories[len(m.Repositories)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Config = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -15894,7 +25442,7 @@ func (m *GithubEventSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { +func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -15917,17 +25465,17 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GitlabEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: MQTTEventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GitlabEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MQTTEventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -15937,31 +25485,27 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Webhook == nil { - m.Webhook = &WebhookContext{} - } - if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -15989,11 +25533,11 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProjectID = string(dAtA[iNdEx:postIndex]) + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16021,11 +25565,11 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Events = append(m.Events, string(dAtA[iNdEx:postIndex])) + m.ClientID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessToken", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16052,16 +25596,16 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AccessToken == nil { - m.AccessToken = &v1.SecretKeySelector{} + if m.ConnectionBackoff == nil { + m.ConnectionBackoff = &common.Backoff{} } - if err := m.AccessToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableSSLVerification", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -16078,12 +25622,12 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { break } } - m.EnableSSLVerification = bool(v != 0) + m.JSONBody = bool(v != 0) case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GitlabBaseURL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16093,45 +25637,29 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.GitlabBaseURL = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteHookOnFinish", wireType) + if m.TLS == nil { + m.TLS = &common.TLSConfig{} } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.DeleteHookOnFinish = bool(v != 0) - case 9: + iNdEx = postIndex + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -16256,7 +25784,79 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Metadata[mapkey] = mapvalue + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Auth == nil { + m.Auth = &common.BasicAuth{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -16279,7 +25879,7 @@ func (m *GitlabEventSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { +func (m *NATSAuth) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16302,15 +25902,15 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HDFSEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: NATSAuth: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HDFSEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NATSAuth: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WatchPathConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16337,15 +25937,18 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.WatchPathConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Basic == nil { + m.Basic = &common.BasicAuth{} + } + if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16355,61 +25958,33 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckInterval", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.Token == nil { + m.Token = &v1.SecretKeySelector{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.CheckInterval = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NKey", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16419,59 +25994,31 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HDFSUser", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if m.NKey == nil { + m.NKey = &v1.SecretKeySelector{} } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.NKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.HDFSUser = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbCCacheSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Credential", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16498,52 +26045,66 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KrbCCacheSecret == nil { - m.KrbCCacheSecret = &v1.SecretKeySelector{} + if m.Credential == nil { + m.Credential = &v1.SecretKeySelector{} } - if err := m.KrbCCacheSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Credential.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbKeytabSecret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.KrbKeytabSecret == nil { - m.KrbKeytabSecret = &v1.SecretKeySelector{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.KrbKeytabSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 8: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NATSEventsSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NATSEventsSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbUsername", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16571,11 +26132,11 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KrbUsername = string(dAtA[iNdEx:postIndex]) + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbRealm", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16603,11 +26164,11 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KrbRealm = string(dAtA[iNdEx:postIndex]) + m.Subject = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 10: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbConfigConfigMap", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -16634,18 +26195,38 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.KrbConfigConfigMap == nil { - m.KrbConfigConfigMap = &v1.ConfigMapKeySelector{} + if m.ConnectionBackoff == nil { + m.ConnectionBackoff = &common.Backoff{} } - if err := m.KrbConfigConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KrbServicePrincipalName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16655,25 +26236,29 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.KrbServicePrincipalName = string(dAtA[iNdEx:postIndex]) + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 12: + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -16800,61 +26385,11 @@ func (m *HDFSEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KafkaConsumerGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KafkaConsumerGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16864,29 +26399,33 @@ func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.GroupName = string(dAtA[iNdEx:postIndex]) + if m.Auth == nil { + m.Auth = &NATSAuth{} + } + if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Oldest", wireType) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -16896,15 +26435,31 @@ func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - m.Oldest = bool(v != 0) - case 3: + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RebalanceStrategy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Queue", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -16932,7 +26487,8 @@ func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.RebalanceStrategy = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.Queue = &s iNdEx = postIndex default: iNdEx = preIndex @@ -16955,7 +26511,7 @@ func (m *KafkaConsumerGroup) Unmarshal(dAtA []byte) error { } return nil } -func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { +func (m *NSQEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -16978,15 +26534,15 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KafkaEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: NSQEventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KafkaEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: NSQEventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostAddress", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17014,11 +26570,11 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.HostAddress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17046,11 +26602,11 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Partition = string(dAtA[iNdEx:postIndex]) + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17078,7 +26634,7 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Topic = string(dAtA[iNdEx:postIndex]) + m.Channel = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { @@ -17117,6 +26673,26 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } @@ -17152,26 +26728,6 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.JSONBody = bool(v != 0) case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) @@ -17301,7 +26857,7 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGroup", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17328,18 +26884,68 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConsumerGroup == nil { - m.ConsumerGroup = &KafkaConsumerGroup{} + if m.Filter == nil { + m.Filter = &EventSourceFilter{} } - if err := m.ConsumerGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LimitEventsPerSecond", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - m.LimitEventsPerSecond = 0 + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnedRepositories) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnedRepositories: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnedRepositories: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17349,14 +26955,27 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.LimitEventsPerSecond |= int64(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - case 10: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17384,13 +27003,63 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var msglen int + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PayloadEnrichmentFlags) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PayloadEnrichmentFlags: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PayloadEnrichmentFlags: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FetchPROnPRCommentAdded", wireType) + } + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17400,28 +27069,12 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SASL == nil { - m.SASL = &common.SASLConfig{} - } - if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.FetchPROnPRCommentAdded = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17443,7 +27096,7 @@ func (m *KafkaEventSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { +func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17466,15 +27119,15 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MQTTEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: PubSubEventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MQTTEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PubSubEventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17502,11 +27155,11 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.ProjectID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TopicProjectID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17534,11 +27187,11 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Topic = string(dAtA[iNdEx:postIndex]) + m.TopicProjectID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -17566,11 +27219,43 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ClientID = string(dAtA[iNdEx:postIndex]) + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17597,16 +27282,16 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConnectionBackoff == nil { - m.ConnectionBackoff = &common.Backoff{} + if m.CredentialSecret == nil { + m.CredentialSecret = &v1.SecretKeySelector{} } - if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.CredentialSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DeleteSubscriptionOnFinish", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -17623,12 +27308,12 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { break } } - m.JSONBody = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + m.DeleteSubscriptionOnFinish = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17638,29 +27323,13 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: + m.JSONBody = bool(v != 0) + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -17787,6 +27456,42 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -17808,7 +27513,7 @@ func (m *MQTTEventSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NATSAuth) Unmarshal(dAtA []byte) error { +func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -17821,27 +27526,59 @@ func (m *NATSAuth) Unmarshal(dAtA []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PulsarEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PulsarEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topics", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NATSAuth: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NATSAuth: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Topics = append(m.Topics, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Basic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17851,33 +27588,29 @@ func (m *NATSAuth) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Basic == nil { - m.Basic = &common.BasicAuth{} - } - if err := m.Basic.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -17887,31 +27620,27 @@ func (m *NATSAuth) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Token == nil { - m.Token = &v1.SecretKeySelector{} - } - if err := m.Token.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLSTrustCertsSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17938,16 +27667,56 @@ func (m *NATSAuth) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.NKey == nil { - m.NKey = &v1.SecretKeySelector{} + if m.TLSTrustCertsSecret == nil { + m.TLSTrustCertsSecret = &v1.SecretKeySelector{} } - if err := m.NKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TLSTrustCertsSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TLSAllowInsecureConnection", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TLSAllowInsecureConnection = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TLSValidateHostname", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TLSValidateHostname = bool(v != 0) + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Credential", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -17974,68 +27743,18 @@ func (m *NATSAuth) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Credential == nil { - m.Credential = &v1.SecretKeySelector{} + if m.TLS == nil { + m.TLS = &common.TLSConfig{} } - if err := m.Credential.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NATSEventsSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NATSEventsSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18045,29 +27764,53 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + if m.ConnectionBackoff == nil { + m.ConnectionBackoff = &common.Backoff{} + } + if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18077,27 +27820,122 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subject = string(dAtA[iNdEx:postIndex]) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuthTokenSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18124,36 +27962,16 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConnectionBackoff == nil { - m.ConnectionBackoff = &common.Backoff{} + if m.AuthTokenSecret == nil { + m.AuthTokenSecret = &v1.SecretKeySelector{} } - if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AuthTokenSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.JSONBody = bool(v != 0) - case 5: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18180,16 +27998,16 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} + if m.Filter == nil { + m.Filter = &EventSourceFilter{} } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuthAthenzParams", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18216,8 +28034,8 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + if m.AuthAthenzParams == nil { + m.AuthAthenzParams = make(map[string]string) } var mapkey string var mapvalue string @@ -18312,11 +28130,11 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Metadata[mapkey] = mapvalue + m.AuthAthenzParams[mapkey] = mapvalue iNdEx = postIndex - case 7: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auth", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuthAthenzSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -18343,10 +28161,10 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Auth == nil { - m.Auth = &NATSAuth{} + if m.AuthAthenzSecret == nil { + m.AuthAthenzSecret = &v1.SecretKeySelector{} } - if err := m.Auth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AuthAthenzSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -18371,7 +28189,7 @@ func (m *NATSEventsSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *NSQEventSource) Unmarshal(dAtA []byte) error { +func (m *RedisEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18394,10 +28212,10 @@ func (m *NSQEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: NSQEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: RedisEventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: NSQEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RedisEventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -18434,9 +28252,9 @@ func (m *NSQEventSource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18446,27 +28264,31 @@ func (m *NSQEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Topic = string(dAtA[iNdEx:postIndex]) + if m.Password == nil { + m.Password = &v1.SecretKeySelector{} + } + if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Channel", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18494,13 +28316,13 @@ func (m *NSQEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Channel = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) } - var msglen int + m.DB = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18510,33 +28332,16 @@ func (m *NSQEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.DB |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConnectionBackoff == nil { - m.ConnectionBackoff = &common.Backoff{} - } - if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18546,12 +28351,24 @@ func (m *NSQEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.JSONBody = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Channels = append(m.Channels, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex case 6: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) @@ -18715,61 +28532,11 @@ func (m *NSQEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OwnedRepositories) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OwnedRepositories: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OwnedRepositories: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18779,27 +28546,51 @@ func (m *OwnedRepositories) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Owner = string(dAtA[iNdEx:postIndex]) + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.JSONBody = bool(v != 0) + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18827,7 +28618,7 @@ func (m *OwnedRepositories) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + m.Username = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -18850,7 +28641,7 @@ func (m *OwnedRepositories) Unmarshal(dAtA []byte) error { } return nil } -func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { +func (m *RedisStreamEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -18873,15 +28664,15 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PubSubEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: RedisStreamEventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PubSubEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RedisStreamEventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProjectID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HostAddress", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -18909,13 +28700,13 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ProjectID = string(dAtA[iNdEx:postIndex]) + m.HostAddress = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopicProjectID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18925,29 +28716,33 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.TopicProjectID = string(dAtA[iNdEx:postIndex]) + if m.Password == nil { + m.Password = &v1.SecretKeySelector{} + } + if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) } - var stringLen uint64 + m.DB = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -18957,27 +28752,14 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.DB |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Topic = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19005,13 +28787,13 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SubscriptionID = string(dAtA[iNdEx:postIndex]) + m.Streams = append(m.Streams, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CredentialSecret", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxMsgCountPerRead", wireType) } - var msglen int + m.MaxMsgCountPerRead = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19021,33 +28803,16 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.MaxMsgCountPerRead |= int32(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CredentialSecret == nil { - m.CredentialSecret = &v1.SecretKeySelector{} - } - if err := m.CredentialSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteSubscriptionOnFinish", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGroup", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19057,37 +28822,29 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.DeleteSubscriptionOnFinish = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - m.JSONBody = bool(v != 0) - case 8: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsumerGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCredentialsFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19097,25 +28854,29 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedCredentialsFile = string(dAtA[iNdEx:postIndex]) + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 8: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -19242,61 +29003,11 @@ func (m *PubSubEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PulsarEventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PulsarEventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topics", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19306,27 +29017,31 @@ func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Topics = append(m.Topics, string(dAtA[iNdEx:postIndex])) + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19351,14 +29066,64 @@ func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { if postIndex < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19386,11 +29151,11 @@ func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSTrustCertsSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19417,56 +29182,16 @@ func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLSTrustCertsSecret == nil { - m.TLSTrustCertsSecret = &v1.SecretKeySelector{} + if m.Filter == nil { + m.Filter = &ResourceFilter{} } - if err := m.TLSTrustCertsSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSAllowInsecureConnection", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TLSAllowInsecureConnection = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TLSValidateHostname", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TLSValidateHostname = bool(v != 0) - case 7: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersionResource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19493,18 +29218,15 @@ func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.GroupVersionResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventTypes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19514,49 +29236,25 @@ func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ConnectionBackoff == nil { - m.ConnectionBackoff = &common.Backoff{} - } - if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.EventTypes = append(m.EventTypes, ResourceEventType(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field JSONBody", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.JSONBody = bool(v != 0) - case 10: + case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -19683,59 +29381,9 @@ func (m *PulsarEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RedisEventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RedisEventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RedisEventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostAddress", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19763,47 +29411,61 @@ func (m *RedisEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HostAddress = string(dAtA[iNdEx:postIndex]) + m.Cluster = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.Password == nil { - m.Password = &v1.SecretKeySelector{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 3: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -19831,32 +29493,13 @@ func (m *RedisEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DB", wireType) - } - m.DB = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DB |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -19866,27 +29509,29 @@ func (m *RedisEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Channels = append(m.Channels, string(dAtA[iNdEx:postIndex])) + m.Labels = append(m.Labels, Selector{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19913,16 +29558,14 @@ func (m *RedisEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Fields = append(m.Fields, Selector{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CreatedBy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -19949,104 +29592,30 @@ func (m *RedisEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[string]string) + if err := m.CreatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AfterStart", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break } } - m.Metadata[mapkey] = mapvalue - iNdEx = postIndex + m.AfterStart = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20068,7 +29637,7 @@ func (m *RedisEventSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { +func (m *SFTPEventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -20091,15 +29660,15 @@ func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ResourceEventSource: wiretype end group for non-group") + return fmt.Errorf("proto: SFTPEventSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SFTPEventSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -20120,18 +29689,87 @@ func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EventType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchPathConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.WatchPathConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + if m.Username == nil { + m.Username = &v1.SecretKeySelector{} + } + if err := m.Username.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20158,16 +29796,16 @@ func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Filter == nil { - m.Filter = &ResourceFilter{} + if m.Password == nil { + m.Password = &v1.SecretKeySelector{} } - if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersionResource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SSHKeySecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20194,15 +29832,18 @@ func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.GroupVersionResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SSHKeySecret == nil { + m.SSHKeySecret = &v1.SecretKeySelector{} + } + if err := m.SSHKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventTypes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20212,25 +29853,29 @@ func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.EventTypes = append(m.EventTypes, ResourceEventType(dAtA[iNdEx:postIndex])) + if m.Address == nil { + m.Address = &v1.SecretKeySelector{} + } + if err := m.Address.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } @@ -20357,91 +30002,9 @@ func (m *ResourceEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceFilter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceFilter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFilter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -20468,50 +30031,18 @@ func (m *ResourceFilter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Labels = append(m.Labels, Selector{}) - if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if m.Filter == nil { + m.Filter = &EventSourceFilter{} } - m.Fields = append(m.Fields, Selector{}) - if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedBy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PollIntervalDuration", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -20521,45 +30052,24 @@ func (m *ResourceFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.CreatedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.PollIntervalDuration = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AfterStart", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AfterStart = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -20960,7 +30470,75 @@ func (m *SNSEventSource) Unmarshal(dAtA []byte) error { break } } - m.ValidateSignature = bool(v != 0) + m.ValidateSignature = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21377,6 +30955,130 @@ func (m *SQSEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DLQ", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DLQ = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoint = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SessionToken == nil { + m.SessionToken = &v1.SecretKeySelector{} + } + if err := m.SessionToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -21924,6 +31626,42 @@ func (m *SlackEventSource) Unmarshal(dAtA []byte) error { } m.Metadata[mapkey] = mapvalue iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -23851,10 +33589,80 @@ func (m *WebhookContext) Unmarshal(dAtA []byte) error { } iNdEx = postIndex case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPayloadSize", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MaxPayloadSize = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookEventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookEventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookEventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServerCertPath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field WebhookContext", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23864,29 +33672,30 @@ func (m *WebhookContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedServerCertPath = string(dAtA[iNdEx:postIndex]) + if err := m.WebhookContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServerKeyPath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -23896,23 +33705,27 @@ func (m *WebhookContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedServerKeyPath = string(dAtA[iNdEx:postIndex]) + if m.Filter == nil { + m.Filter = &EventSourceFilter{} + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex diff --git a/pkg/apis/eventsource/v1alpha1/generated.proto b/pkg/apis/eventsource/v1alpha1/generated.proto index 5e718f695c..7e3afcdccd 100644 --- a/pkg/apis/eventsource/v1alpha1/generated.proto +++ b/pkg/apis/eventsource/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package github.com.argoproj.argo_events.pkg.apis.eventsource.v1alpha1; @@ -27,7 +27,7 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1"; // AMQPConsumeConfig holds the configuration to immediately starts delivering queued messages // +k8s:openapi-gen=true @@ -87,31 +87,38 @@ message AMQPEventSource { map metadata = 8; // ExchangeDeclare holds the configuration for the exchange on the server - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.ExchangeDeclare + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare // +optional optional AMQPExchangeDeclareConfig exchangeDeclare = 9; // QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. // Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches // the same parameters - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueDeclare + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare // +optional optional AMQPQueueDeclareConfig queueDeclare = 10; // QueueBind holds the configuration that binds an exchange to a queue so that publishings to the // exchange will be routed to the queue when the publishing routing key matches the binding routing key - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueBind + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind // +optional optional AMQPQueueBindConfig queueBind = 11; // Consume holds the configuration to immediately starts delivering queued messages - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.Consume + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume // +optional optional AMQPConsumeConfig consume = 12; // Auth hosts secret selectors for username and password // +optional optional github.com.argoproj.argo_events.pkg.apis.common.BasicAuth auth = 13; + + // URLSecret is secret reference for rabbitmq service URL + optional k8s.io.api.core.v1.SecretKeySelector urlSecret = 14; + + // Filter + // +optional + optional EventSourceFilter filter = 15; } // AMQPExchangeDeclareConfig holds the configuration for the exchange on the server @@ -168,6 +175,10 @@ message AMQPQueueDeclareConfig { // NowWait when true, the queue assumes to be declared on the server // +optional optional bool noWait = 5; + + // Arguments of a queue (also known as "x-arguments") used for optional features and plugins + // +optional + optional string arguments = 6; } // AzureEventsHubEventSource describes the event source for azure events hub @@ -189,34 +200,268 @@ message AzureEventsHubEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 5; + + // Filter + // +optional + optional EventSourceFilter filter = 6; +} + +// AzureQueueStorageEventSource describes the event source for azure queue storage +// more info at https://learn.microsoft.com/en-us/azure/storage/queues/ +message AzureQueueStorageEventSource { + // StorageAccountName is the name of the storage account where the queue is. This field is necessary to + // access via Azure AD (managed identity) and it is ignored if ConnectionString is set. + // +optional + optional string storageAccountName = 1; + + // ConnectionString is the connection string to access Azure Queue Storage. If this fields is not provided + // it will try to access via Azure AD with StorageAccountName. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector connectionString = 2; + + // QueueName is the name of the queue + optional string queueName = 3; + + // JSONBody specifies that all event body payload coming from this + // source will be JSON + // +optional + optional bool jsonBody = 4; + + // DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. + // If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. + // The default value is false. + // +optional + optional bool dlq = 5; + + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + map metadata = 6; + + // Filter + // +optional + optional EventSourceFilter filter = 7; + + // DecodeMessage specifies if all the messages should be base64 decoded. + // If set to true the decoding is done before the evaluation of JSONBody + // +optional + optional bool decodeMessage = 8; + + // WaitTimeInSeconds is the duration (in seconds) for which the event source waits between empty results from the queue. + // The default value is 3 seconds. + // +optional + optional int32 waitTimeInSeconds = 9; +} + +// AzureServiceBusEventSource describes the event source for azure service bus +// More info at https://docs.microsoft.com/en-us/azure/service-bus-messaging/ +message AzureServiceBusEventSource { + // ConnectionString is the connection string for the Azure Service Bus. If this fields is not provided + // it will try to access via Azure AD with DefaultAzureCredential and FullyQualifiedNamespace. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector connectionString = 1; + + // QueueName is the name of the Azure Service Bus Queue + optional string queueName = 2; + + // TopicName is the name of the Azure Service Bus Topic + optional string topicName = 3; + + // SubscriptionName is the name of the Azure Service Bus Topic Subscription + optional string subscriptionName = 4; + + // TLS configuration for the service bus client + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.TLSConfig tls = 5; + + // JSONBody specifies that all event body payload coming from this + // source will be JSON + // +optional + optional bool jsonBody = 6; + + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + map metadata = 7; + + // Filter + // +optional + optional EventSourceFilter filter = 8; + + // FullyQualifiedNamespace is the Service Bus namespace name (ex: myservicebus.servicebus.windows.net). This field is necessary to + // access via Azure AD (managed identity) and it is ignored if ConnectionString is set. + // +optional + optional string fullyQualifiedNamespace = 9; +} + +// BitbucketAuth holds the different auth strategies for connecting to Bitbucket +message BitbucketAuth { + // Basic is BasicAuth auth strategy. + // +optional + optional BitbucketBasicAuth basic = 1; + + // OAuthToken refers to the K8s secret that holds the OAuth Bearer token. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector oauthToken = 2; +} + +// BasicAuth holds the information required to authenticate user via basic auth mechanism +message BitbucketBasicAuth { + // Username refers to the K8s secret that holds the username. + optional k8s.io.api.core.v1.SecretKeySelector username = 1; + + // Password refers to the K8s secret that holds the password. + optional k8s.io.api.core.v1.SecretKeySelector password = 2; +} + +// BitbucketEventSource describes the event source for Bitbucket +message BitbucketEventSource { + // DeleteHookOnFinish determines whether to delete the defined Bitbucket hook once the event source is stopped. + // +optional + optional bool deleteHookOnFinish = 1; + + // Metadata holds the user defined metadata which will be passed along the event payload. + // +optional + map metadata = 2; + + // Webhook refers to the configuration required to run an http server + optional WebhookContext webhook = 3; + + // Auth information required to connect to Bitbucket. + optional BitbucketAuth auth = 4; + + // Events this webhook is subscribed to. + repeated string events = 5; + + // DeprecatedOwner is the owner of the repository. + // Deprecated: use Repositories instead. Will be unsupported in v1.9 + // +optional + optional string owner = 6; + + // DeprecatedProjectKey is the key of the project to which the repository relates + // Deprecated: use Repositories instead. Will be unsupported in v1.9 + // +optional + optional string projectKey = 7; + + // DeprecatedRepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL + // Deprecated: use Repositories instead. Will be unsupported in v1.9 + // +optional + optional string repositorySlug = 8; + + // Repositories holds a list of repositories for which integration needs to set up + // +optional + repeated BitbucketRepository repositories = 9; + + // Filter + // +optional + optional EventSourceFilter filter = 10; +} + +message BitbucketRepository { + // Owner is the owner of the repository + optional string owner = 1; + + // RepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL + optional string repositorySlug = 2; +} + +// BitbucketServerEventSource refers to event-source related to Bitbucket Server events +message BitbucketServerEventSource { + // Webhook holds configuration to run a http server. + optional WebhookContext webhook = 1; + + // DeprecatedProjectKey is the key of project for which integration needs to set up. + // Deprecated: use Repositories instead. Will be unsupported in v1.8. + // +optional + optional string projectKey = 2; + + // DeprecatedRepositorySlug is the slug of the repository for which integration needs to set up. + // Deprecated: use Repositories instead. Will be unsupported in v1.8. + // +optional + optional string repositorySlug = 3; + + // Projects holds a list of projects for which integration needs to set up, this will add the webhook to all repositories in the project. + // +optional + repeated string projects = 4; + + // Repositories holds a list of repositories for which integration needs to set up. + // +optional + repeated BitbucketServerRepository repositories = 5; + + // Events are bitbucket event to listen to. + // Refer https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html + // +optional + repeated string events = 6; + + // SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for branches whenever there's an associated open pull request. + // This helps in optimizing the event handling process by avoiding unnecessary triggers for branch reference changes that are already part of a pull request under review. + // +optional + optional bool skipBranchRefsChangedOnOpenPR = 7; + + // AccessToken is reference to K8s secret which holds the bitbucket api access information. + optional k8s.io.api.core.v1.SecretKeySelector accessToken = 8; + + // WebhookSecret is reference to K8s secret which holds the bitbucket webhook secret (for HMAC validation). + optional k8s.io.api.core.v1.SecretKeySelector webhookSecret = 9; + + // BitbucketServerBaseURL is the base URL for API requests to a custom endpoint. + optional string bitbucketserverBaseURL = 10; + + // DeleteHookOnFinish determines whether to delete the Bitbucket Server hook for the project once the event source is stopped. + // +optional + optional bool deleteHookOnFinish = 11; + + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + map metadata = 12; + + // Filter + // +optional + optional EventSourceFilter filter = 13; + + // TLS configuration for the bitbucketserver client. + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.TLSConfig tls = 14; + + // CheckInterval is a duration in which to wait before checking that the webhooks exist, e.g. 1s, 30m, 2h... (defaults to 1m) + // +optional + optional string checkInterval = 15; +} + +message BitbucketServerRepository { + // ProjectKey is the key of project for which integration needs to set up. + optional string projectKey = 1; + + // RepositorySlug is the slug of the repository for which integration needs to set up. + optional string repositorySlug = 2; } // CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. // Schedule takes precedence over interval; interval takes precedence over recurrence message CalendarEventSource { // Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron + // +optional optional string schedule = 1; // Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h... + // +optional optional string interval = 2; + // ExclusionDates defines the list of DATE-TIME exceptions for recurring events. repeated string exclusionDates = 3; // Timezone in which to run the schedule // +optional optional string timezone = 4; - // UserPayload will be sent to sensor as extra data once the event is triggered - // +optional - // Deprecated: will be removed in v1.5. Please use Metadata instead. - optional bytes userPayload = 5; - // Metadata holds the user defined metadata which will passed along the event payload. // +optional - map metadata = 6; + map metadata = 5; // Persistence hold the configuration for event persistence - optional EventPersistence persistence = 7; + optional EventPersistence persistence = 6; + + // Filter + // +optional + optional EventSourceFilter filter = 8; } message CatchupConfiguration { @@ -271,6 +516,10 @@ message EmitterEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 9; + + // Filter + // +optional + optional EventSourceFilter filter = 10; } message EventPersistence { @@ -296,6 +545,10 @@ message EventSource { optional EventSourceStatus status = 3; } +message EventSourceFilter { + optional string expression = 1; +} + // EventSourceList is the list of eventsource resources // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message EventSourceList { @@ -317,84 +570,101 @@ message EventSourceSpec { // +optional optional Service service = 3; - // DeprecatedReplica is the event source deployment replicas - // Deprecated: use replicas instead, will be removed in v1.5 - optional int32 replica = 4; - // Minio event sources - map minio = 5; + map minio = 4; // Calendar event sources - map calendar = 6; + map calendar = 5; // File event sources - map file = 7; + map file = 6; // Resource event sources - map resource = 8; + map resource = 7; // Webhook event sources - map webhook = 9; + map webhook = 8; // AMQP event sources - map amqp = 10; + map amqp = 9; // Kafka event sources - map kafka = 11; + map kafka = 10; // MQTT event sources - map mqtt = 12; + map mqtt = 11; // NATS event sources - map nats = 13; + map nats = 12; // SNS event sources - map sns = 14; + map sns = 13; // SQS event sources - map sqs = 15; + map sqs = 14; // PubSub event sources - map pubSub = 16; + map pubSub = 15; // Github event sources - map github = 17; + map github = 16; // Gitlab event sources - map gitlab = 18; + map gitlab = 17; // HDFS event sources - map hdfs = 19; + map hdfs = 18; // Slack event sources - map slack = 20; + map slack = 19; // StorageGrid event sources - map storageGrid = 21; + map storageGrid = 20; // AzureEventsHub event sources - map azureEventsHub = 22; + map azureEventsHub = 21; // Stripe event sources - map stripe = 23; + map stripe = 22; // Emitter event source - map emitter = 24; + map emitter = 23; // Redis event source - map redis = 25; + map redis = 24; // NSQ event source - map nsq = 26; + map nsq = 25; // Pulsar event source - map pulsar = 27; + map pulsar = 26; // Generic event source - map generic = 28; + map generic = 27; // Replicas is the event source deployment replicas - optional int32 replicas = 29; + optional int32 replicas = 28; + + // Bitbucket Server event sources + map bitbucketserver = 29; + + // Bitbucket event sources + map bitbucket = 30; + + // Redis stream source + map redisStream = 31; + + // Azure Service Bus event source + map azureServiceBus = 32; + + // AzureQueueStorage event source + map azureQueueStorage = 33; + + // SFTP event sources + map sftp = 34; + + // Gerrit event source + map gerrit = 35; } // EventSourceStatus holds the status of the event-source resource @@ -417,6 +687,10 @@ message FileEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 4; + + // Filter + // +optional + optional EventSourceFilter filter = 5; } // GenericEventSource refers to a generic event source. It can be used to implement a custom event source. @@ -442,12 +716,67 @@ message GenericEventSource { // AuthSecret holds a secret selector that contains a bearer token for authentication // +optional optional k8s.io.api.core.v1.SecretKeySelector authSecret = 6; + + // Filter + // +optional + optional EventSourceFilter filter = 7; +} + +// GerritEventSource refers to event-source related to gerrit events +message GerritEventSource { + // Webhook holds configuration to run a http server + optional WebhookContext webhook = 1; + + // HookName is the name of the webhook + optional string hookName = 2; + + // Events are gerrit event to listen to. + // Refer https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events + repeated string events = 3; + + // Auth hosts secret selectors for username and password + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.BasicAuth auth = 4; + + // GerritBaseURL is the base URL for API requests to a custom endpoint + optional string gerritBaseURL = 5; + + // DeleteHookOnFinish determines whether to delete the Gerrit hook for the project once the event source is stopped. + // +optional + optional bool deleteHookOnFinish = 6; + + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + map metadata = 7; + + // List of project namespace paths like "whynowy/test". + repeated string projects = 8; + + // SslVerify to enable ssl verification + // +optional + optional bool sslVerify = 9; + + // Filter + // +optional + optional EventSourceFilter filter = 10; +} + +message GithubAppCreds { + // PrivateKey refers to a K8s secret containing the GitHub app private key + optional k8s.io.api.core.v1.SecretKeySelector privateKey = 1; + + // AppID refers to the GitHub App ID for the application you created + optional int64 appID = 2; + + // InstallationID refers to the Installation ID of the GitHub app you created and installed + optional int64 installationID = 3; } // GithubEventSource refers to event-source for github related events message GithubEventSource { // Id is the webhook's id // Deprecated: This is not used at all, will be removed in v1.6 + // +optional optional int64 id = 1; // Webhook refers to the configuration required to run a http server @@ -455,12 +784,15 @@ message GithubEventSource { // DeprecatedOwner refers to GitHub owner name i.e. argoproj // Deprecated: use Repositories instead. Will be unsupported in v 1.6 + // +optional optional string owner = 3; // DeprecatedRepository refers to GitHub repo name i.e. argo-events // Deprecated: use Repositories instead. Will be unsupported in v 1.6 + // +optional optional string repository = 4; + // Events refer to Github events to which the event source will subscribe repeated string events = 5; // APIToken refers to a K8s secret containing github api token @@ -500,8 +832,24 @@ message GithubEventSource { map metadata = 14; // Repositories holds the information of repositories, which uses repo owner as the key, - // and list of repo names as the value + // and list of repo names as the value. Not required if Organizations is set. repeated OwnedRepositories repositories = 15; + + // Organizations holds the names of organizations (used for organization level webhooks). Not required if Repositories is set. + repeated string organizations = 16; + + // GitHubApp holds the GitHub app credentials + // +optional + optional GithubAppCreds githubApp = 17; + + // Filter + // +optional + optional EventSourceFilter filter = 18; + + // PayloadEnrichment holds flags that determine whether to enrich GitHub's original payload with + // additional information. + // +optional + optional PayloadEnrichmentFlags payloadEnrichment = 19; } // GitlabEventSource refers to event-source related to Gitlab events @@ -509,14 +857,16 @@ message GitlabEventSource { // Webhook holds configuration to run a http server optional WebhookContext webhook = 1; - // ProjectID is the id of project for which integration needs to setup + // DeprecatedProjectID is the id of project for which integration needs to setup + // Deprecated: use Projects instead. Will be unsupported in v 1.7 + // +optional optional string projectID = 2; // Events are gitlab event to listen to. // Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794. repeated string events = 3; - // AccessToken is reference to k8 secret which holds the gitlab api access information + // AccessToken references to k8 secret which holds the gitlab api access information optional k8s.io.api.core.v1.SecretKeySelector accessToken = 4; // EnableSSLVerification to enable ssl verification @@ -533,6 +883,22 @@ message GitlabEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 9; + + // List of project IDs or project namespace paths like "whynowy/test". Projects and groups cannot be empty at the same time. + // +optional + repeated string projects = 10; + + // SecretToken references to k8 secret which holds the Secret Token used by webhook config + optional k8s.io.api.core.v1.SecretKeySelector secretToken = 11; + + // Filter + // +optional + optional EventSourceFilter filter = 12; + + // List of group IDs or group name like "test". + // Group level hook available in Premium and Ultimate Gitlab. + // +optional + repeated string groups = 13; } // HDFSEventSource refers to event-source for HDFS related events @@ -578,6 +944,10 @@ message HDFSEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 12; + + // Filter + // +optional + optional EventSourceFilter filter = 13; } message KafkaConsumerGroup { @@ -599,6 +969,7 @@ message KafkaEventSource { optional string url = 1; // Partition name + // +optional optional string partition = 2; // Topic name @@ -635,6 +1006,23 @@ message KafkaEventSource { // SASL configuration for the kafka client // +optional optional github.com.argoproj.argo_events.pkg.apis.common.SASLConfig sasl = 11; + + // Filter + // +optional + optional EventSourceFilter filter = 12; + + // Yaml format Sarama config for Kafka connection. + // It follows the struct of sarama.Config. See https://github.com/IBM/sarama/blob/main/config.go + // e.g. + // + // consumer: + // fetch: + // min: 1 + // net: + // MaxOpenRequests: 5 + // + // +optional + optional string config = 13; } // MQTTEventSource refers to event-source for MQTT related events @@ -663,6 +1051,14 @@ message MQTTEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 7; + + // Filter + // +optional + optional EventSourceFilter filter = 8; + + // Auth hosts secret selectors for username and password + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.BasicAuth auth = 9; } // NATSAuth refers to the auth info for NATS EventSource @@ -711,6 +1107,15 @@ message NATSEventsSource { // Auth information // +optional optional NATSAuth auth = 7; + + // Filter + // +optional + optional EventSourceFilter filter = 8; + + // Queue is the name of the queue group to subscribe as if specified. Uses QueueSubscribe + // logic to subscribe as queue group. If the queue is empty, uses default Subscribe logic. + // +optional + optional string queue = 9; } // NSQEventSource describes the event source for NSQ PubSub @@ -741,16 +1146,27 @@ message NSQEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 7; + + // Filter + // +optional + optional EventSourceFilter filter = 8; } message OwnedRepositories { - // Orgnization or user name + // Organization or user name optional string owner = 1; // Repository names repeated string names = 2; } +message PayloadEnrichmentFlags { + // FetchPROnPRCommentAdded determines whether to enrich the payload provided by GitHub + // on "pull request comment added" events, with the full pull request info + // +optional + optional bool fetchPROnPRCommentAdded = 1; +} + // PubSubEventSource refers to event-source for GCP PubSub related events. message PubSubEventSource { // ProjectID is GCP project ID for the subscription. @@ -793,13 +1209,13 @@ message PubSubEventSource { // +optional optional bool jsonBody = 7; - // CredentialsFile is the file that contains credentials to authenticate for GCP - // Deprecated: will be removed in v1.5, use CredentialSecret instead - optional string credentialsFile = 8; - // Metadata holds the user defined metadata which will passed along the event payload. // +optional - map metadata = 9; + map metadata = 8; + + // Filter + // +optional + optional EventSourceFilter filter = 9; } // PulsarEventSource describes the event source for Apache Pulsar @@ -846,6 +1262,26 @@ message PulsarEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 10; + + // Authentication token for the pulsar client. + // Either token or athenz can be set to use auth. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector authTokenSecret = 11; + + // Filter + // +optional + optional EventSourceFilter filter = 12; + + // Authentication athenz parameters for the pulsar client. + // Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go + // Either token or athenz can be set to use auth. + // +optional + map authAthenzParams = 13; + + // Authentication athenz privateKey secret for the pulsar client. + // AuthAthenzSecret must be set if AuthAthenzParams is used. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector authAthenzSecret = 14; } // RedisEventSource describes an event source for the Redis PubSub. @@ -875,6 +1311,64 @@ message RedisEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 7; + + // Filter + // +optional + optional EventSourceFilter filter = 8; + + // JSONBody specifies that all event body payload coming from this + // source will be JSON + // +optional + optional bool jsonBody = 9; + + // Username required for ACL style authentication if any. + // +optional + optional string username = 10; +} + +// RedisStreamEventSource describes an event source for +// Redis streams (https://redis.io/topics/streams-intro) +message RedisStreamEventSource { + // HostAddress refers to the address of the Redis host/server (master instance) + optional string hostAddress = 1; + + // Password required for authentication if any. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector password = 2; + + // DB to use. If not specified, default DB 0 will be used. + // +optional + optional int32 db = 3; + + // Streams to look for entries. XREADGROUP is used on all streams using a single consumer group. + repeated string streams = 4; + + // MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams + // Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. + // Same as COUNT option in XREADGROUP(https://redis.io/topics/streams-intro). Defaults to 10 + // +optional + optional int32 maxMsgCountPerRead = 5; + + // ConsumerGroup refers to the Redis stream consumer group that will be + // created on all redis streams. Messages are read through this group. Defaults to 'argo-events-cg' + // +optional + optional string consumerGroup = 6; + + // TLS configuration for the redis client. + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.TLSConfig tls = 7; + + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + map metadata = 8; + + // Filter + // +optional + optional EventSourceFilter filter = 9; + + // Username required for ACL style authentication if any. + // +optional + optional string username = 10; } // ResourceEventSource refers to a event-source for K8s resource related events. @@ -897,9 +1391,12 @@ message ResourceEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 5; + + // Cluster from which events will be listened to + optional string cluster = 6; } -// ResourceFilter contains K8 ObjectMeta information to further filter resource event objects +// ResourceFilter contains K8s ObjectMeta information to further filter resource event objects message ResourceFilter { // Prefix filter is applied on the resource name. // +optional @@ -907,6 +1404,10 @@ message ResourceFilter { // Labels provide listing options to K8s API to watch resource/s. // Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info. + // Unlike K8s field selector, multiple values are passed as comma separated values instead of list of values. + // Eg: value: value1,value2. + // Same as K8s label selector, operator "=", "==", "!=", "exists", "!", "notin", "in", "gt" and "lt" + // are supported // +optional repeated Selector labels = 2; @@ -927,6 +1428,41 @@ message ResourceFilter { optional bool afterStart = 5; } +// SFTPEventSource describes an event-source for sftp related events. +message SFTPEventSource { + // Type of file operations to watch + // Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information + optional string eventType = 1; + + // WatchPathConfig contains configuration about the file path to watch + optional WatchPathConfig watchPathConfig = 2; + + // Username required for authentication if any. + optional k8s.io.api.core.v1.SecretKeySelector username = 3; + + // Password required for authentication if any. + optional k8s.io.api.core.v1.SecretKeySelector password = 4; + + // SSHKeySecret refers to the secret that contains SSH key. Key needs to contain private key and public key. + optional k8s.io.api.core.v1.SecretKeySelector sshKeySecret = 5; + + // Address sftp address. + optional k8s.io.api.core.v1.SecretKeySelector address = 6; + + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + map metadata = 7; + + // Filter + // +optional + optional EventSourceFilter filter = 8; + + // PollIntervalDuration the interval at which to poll the SFTP server + // defaults to 10 seconds + // +optional + optional string pollIntervalDuration = 9; +} + // SNSEventSource refers to event-source for AWS SNS related events message SNSEventSource { // Webhook configuration for http server @@ -935,10 +1471,10 @@ message SNSEventSource { // TopicArn optional string topicArn = 2; - // AccessKey refers K8 secret containing aws access key + // AccessKey refers K8s secret containing aws access key optional k8s.io.api.core.v1.SecretKeySelector accessKey = 3; - // SecretKey refers K8 secret containing aws secret key + // SecretKey refers K8s secret containing aws secret key optional k8s.io.api.core.v1.SecretKeySelector secretKey = 4; // Region is AWS region @@ -955,14 +1491,22 @@ message SNSEventSource { // ValidateSignature is boolean that can be set to true for SNS signature verification // +optional optional bool validateSignature = 8; + + // Filter + // +optional + optional EventSourceFilter filter = 9; + + // Endpoint configures connection to a specific SNS endpoint instead of Amazons servers + // +optional + optional string endpoint = 10; } // SQSEventSource refers to event-source for AWS SQS related events message SQSEventSource { - // AccessKey refers K8 secret containing aws access key + // AccessKey refers K8s secret containing aws access key optional k8s.io.api.core.v1.SecretKeySelector accessKey = 1; - // SecretKey refers K8 secret containing aws secret key + // SecretKey refers K8s secret containing aws secret key optional k8s.io.api.core.v1.SecretKeySelector secretKey = 2; // Region is AWS region @@ -991,6 +1535,24 @@ message SQSEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 9; + + // DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. + // If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. + // The default value is false. + // +optional + optional bool dlq = 10; + + // Filter + // +optional + optional EventSourceFilter filter = 11; + + // Endpoint configures connection to a specific SQS endpoint instead of Amazons servers + // +optional + optional string endpoint = 12; + + // SessionToken refers to K8s secret containing AWS temporary credentials(STS) session token + // +optional + optional k8s.io.api.core.v1.SecretKeySelector sessionToken = 13; } // Selector represents conditional operation to select K8s objects. @@ -998,7 +1560,7 @@ message Selector { // Key name optional string key = 1; - // Supported operations like ==, !=, <=, >= etc. + // Supported operations like ==, != etc. // Defaults to ==. // Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info. // +optional @@ -1043,6 +1605,10 @@ message SlackEventSource { // Metadata holds the user defined metadata which will passed along the event payload. // +optional map metadata = 4; + + // Filter + // +optional + optional EventSourceFilter filter = 5; } // StorageGridEventSource refers to event-source for StorageGrid related events @@ -1217,10 +1783,19 @@ message WebhookContext { // +optional optional k8s.io.api.core.v1.SecretKeySelector authSecret = 8; - // DeprecatedServerCertPath refers the file that contains the cert. - optional string serverCertPath = 9; + // MaxPayloadSize is the maximum webhook payload size that the server will accept. + // Requests exceeding that limit will be rejected with "request too large" response. + // Default value: 1048576 (1MB). + // +optional + optional int64 maxPayloadSize = 9; +} - // DeprecatedServerKeyPath refers the file that contains private key - optional string serverKeyPath = 10; +// CalendarEventSource describes an HTTP based EventSource +message WebhookEventSource { + optional WebhookContext webhookContext = 1; + + // Filter + // +optional + optional EventSourceFilter filter = 2; } diff --git a/pkg/apis/eventsource/v1alpha1/openapi_generated.go b/pkg/apis/eventsource/v1alpha1/openapi_generated.go index 84ec1db7f2..bed3d35ccb 100644 --- a/pkg/apis/eventsource/v1alpha1/openapi_generated.go +++ b/pkg/apis/eventsource/v1alpha1/openapi_generated.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,55 +24,70 @@ limitations under the License. package v1alpha1 import ( - spec "github.com/go-openapi/spec" common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" ) func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPConsumeConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPConsumeConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource": schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPExchangeDeclareConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPExchangeDeclareConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueBindConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPQueueBindConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueDeclareConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPQueueDeclareConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource": schema_pkg_apis_eventsource_v1alpha1_AzureEventsHubEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource": schema_pkg_apis_eventsource_v1alpha1_CalendarEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CatchupConfiguration": schema_pkg_apis_eventsource_v1alpha1_CatchupConfiguration(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ConfigMapPersistence": schema_pkg_apis_eventsource_v1alpha1_ConfigMapPersistence(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource": schema_pkg_apis_eventsource_v1alpha1_EmitterEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventPersistence": schema_pkg_apis_eventsource_v1alpha1_EventPersistence(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSource": schema_pkg_apis_eventsource_v1alpha1_EventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceList": schema_pkg_apis_eventsource_v1alpha1_EventSourceList(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceSpec": schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceStatus": schema_pkg_apis_eventsource_v1alpha1_EventSourceStatus(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource": schema_pkg_apis_eventsource_v1alpha1_FileEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource": schema_pkg_apis_eventsource_v1alpha1_GenericEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource": schema_pkg_apis_eventsource_v1alpha1_GithubEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource": schema_pkg_apis_eventsource_v1alpha1_GitlabEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource": schema_pkg_apis_eventsource_v1alpha1_HDFSEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaConsumerGroup": schema_pkg_apis_eventsource_v1alpha1_KafkaConsumerGroup(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource": schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource": schema_pkg_apis_eventsource_v1alpha1_MQTTEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSAuth": schema_pkg_apis_eventsource_v1alpha1_NATSAuth(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource": schema_pkg_apis_eventsource_v1alpha1_NATSEventsSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource": schema_pkg_apis_eventsource_v1alpha1_NSQEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.OwnedRepositories": schema_pkg_apis_eventsource_v1alpha1_OwnedRepositories(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource": schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource": schema_pkg_apis_eventsource_v1alpha1_PulsarEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource": schema_pkg_apis_eventsource_v1alpha1_RedisEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource": schema_pkg_apis_eventsource_v1alpha1_ResourceEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceFilter": schema_pkg_apis_eventsource_v1alpha1_ResourceFilter(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource": schema_pkg_apis_eventsource_v1alpha1_SNSEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource": schema_pkg_apis_eventsource_v1alpha1_SQSEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Selector": schema_pkg_apis_eventsource_v1alpha1_Selector(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Service": schema_pkg_apis_eventsource_v1alpha1_Service(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource": schema_pkg_apis_eventsource_v1alpha1_SlackEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource": schema_pkg_apis_eventsource_v1alpha1_StorageGridEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridFilter": schema_pkg_apis_eventsource_v1alpha1_StorageGridFilter(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource": schema_pkg_apis_eventsource_v1alpha1_StripeEventSource(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Template": schema_pkg_apis_eventsource_v1alpha1_Template(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WatchPathConfig": schema_pkg_apis_eventsource_v1alpha1_WatchPathConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext": schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPConsumeConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPConsumeConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource": schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPExchangeDeclareConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPExchangeDeclareConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueBindConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPQueueBindConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueDeclareConfig": schema_pkg_apis_eventsource_v1alpha1_AMQPQueueDeclareConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource": schema_pkg_apis_eventsource_v1alpha1_AzureEventsHubEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureQueueStorageEventSource": schema_pkg_apis_eventsource_v1alpha1_AzureQueueStorageEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureServiceBusEventSource": schema_pkg_apis_eventsource_v1alpha1_AzureServiceBusEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketAuth": schema_pkg_apis_eventsource_v1alpha1_BitbucketAuth(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketBasicAuth": schema_pkg_apis_eventsource_v1alpha1_BitbucketBasicAuth(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketEventSource": schema_pkg_apis_eventsource_v1alpha1_BitbucketEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketRepository": schema_pkg_apis_eventsource_v1alpha1_BitbucketRepository(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerEventSource": schema_pkg_apis_eventsource_v1alpha1_BitbucketServerEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerRepository": schema_pkg_apis_eventsource_v1alpha1_BitbucketServerRepository(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource": schema_pkg_apis_eventsource_v1alpha1_CalendarEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CatchupConfiguration": schema_pkg_apis_eventsource_v1alpha1_CatchupConfiguration(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ConfigMapPersistence": schema_pkg_apis_eventsource_v1alpha1_ConfigMapPersistence(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource": schema_pkg_apis_eventsource_v1alpha1_EmitterEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventPersistence": schema_pkg_apis_eventsource_v1alpha1_EventPersistence(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSource": schema_pkg_apis_eventsource_v1alpha1_EventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter": schema_pkg_apis_eventsource_v1alpha1_EventSourceFilter(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceList": schema_pkg_apis_eventsource_v1alpha1_EventSourceList(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceSpec": schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceStatus": schema_pkg_apis_eventsource_v1alpha1_EventSourceStatus(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource": schema_pkg_apis_eventsource_v1alpha1_FileEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource": schema_pkg_apis_eventsource_v1alpha1_GenericEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GerritEventSource": schema_pkg_apis_eventsource_v1alpha1_GerritEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubAppCreds": schema_pkg_apis_eventsource_v1alpha1_GithubAppCreds(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource": schema_pkg_apis_eventsource_v1alpha1_GithubEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource": schema_pkg_apis_eventsource_v1alpha1_GitlabEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource": schema_pkg_apis_eventsource_v1alpha1_HDFSEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaConsumerGroup": schema_pkg_apis_eventsource_v1alpha1_KafkaConsumerGroup(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource": schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource": schema_pkg_apis_eventsource_v1alpha1_MQTTEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSAuth": schema_pkg_apis_eventsource_v1alpha1_NATSAuth(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource": schema_pkg_apis_eventsource_v1alpha1_NATSEventsSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource": schema_pkg_apis_eventsource_v1alpha1_NSQEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.OwnedRepositories": schema_pkg_apis_eventsource_v1alpha1_OwnedRepositories(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PayloadEnrichmentFlags": schema_pkg_apis_eventsource_v1alpha1_PayloadEnrichmentFlags(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource": schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource": schema_pkg_apis_eventsource_v1alpha1_PulsarEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource": schema_pkg_apis_eventsource_v1alpha1_RedisEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisStreamEventSource": schema_pkg_apis_eventsource_v1alpha1_RedisStreamEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource": schema_pkg_apis_eventsource_v1alpha1_ResourceEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceFilter": schema_pkg_apis_eventsource_v1alpha1_ResourceFilter(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SFTPEventSource": schema_pkg_apis_eventsource_v1alpha1_SFTPEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource": schema_pkg_apis_eventsource_v1alpha1_SNSEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource": schema_pkg_apis_eventsource_v1alpha1_SQSEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Selector": schema_pkg_apis_eventsource_v1alpha1_Selector(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Service": schema_pkg_apis_eventsource_v1alpha1_Service(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource": schema_pkg_apis_eventsource_v1alpha1_SlackEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource": schema_pkg_apis_eventsource_v1alpha1_StorageGridEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridFilter": schema_pkg_apis_eventsource_v1alpha1_StorageGridFilter(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource": schema_pkg_apis_eventsource_v1alpha1_StripeEventSource(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Template": schema_pkg_apis_eventsource_v1alpha1_Template(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WatchPathConfig": schema_pkg_apis_eventsource_v1alpha1_WatchPathConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext": schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref), + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookEventSource": schema_pkg_apis_eventsource_v1alpha1_WebhookEventSource(ref), } } @@ -140,6 +156,7 @@ func schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref common.ReferenceCa "exchangeName": { SchemaProps: spec.SchemaProps{ Description: "ExchangeName is the exchange name For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html", + Default: "", Type: []string{"string"}, Format: "", }, @@ -147,6 +164,7 @@ func schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref common.ReferenceCa "exchangeType": { SchemaProps: spec.SchemaProps{ Description: "ExchangeType is rabbitmq exchange type", + Default: "", Type: []string{"string"}, Format: "", }, @@ -154,6 +172,7 @@ func schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref common.ReferenceCa "routingKey": { SchemaProps: spec.SchemaProps{ Description: "Routing key for bindings", + Default: "", Type: []string{"string"}, Format: "", }, @@ -185,8 +204,9 @@ func schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -194,25 +214,25 @@ func schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref common.ReferenceCa }, "exchangeDeclare": { SchemaProps: spec.SchemaProps{ - Description: "ExchangeDeclare holds the configuration for the exchange on the server For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.ExchangeDeclare", + Description: "ExchangeDeclare holds the configuration for the exchange on the server For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare", Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPExchangeDeclareConfig"), }, }, "queueDeclare": { SchemaProps: spec.SchemaProps{ - Description: "QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches the same parameters For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueDeclare", + Description: "QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches the same parameters For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare", Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueDeclareConfig"), }, }, "queueBind": { SchemaProps: spec.SchemaProps{ - Description: "QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueBind", + Description: "QueueBind holds the configuration that binds an exchange to a queue so that publishings to the exchange will be routed to the queue when the publishing routing key matches the binding routing key For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind", Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueBindConfig"), }, }, "consume": { SchemaProps: spec.SchemaProps{ - Description: "Consume holds the configuration to immediately starts delivering queued messages For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.Consume", + Description: "Consume holds the configuration to immediately starts delivering queued messages For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume", Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPConsumeConfig"), }, }, @@ -222,12 +242,24 @@ func schema_pkg_apis_eventsource_v1alpha1_AMQPEventSource(ref common.ReferenceCa Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.BasicAuth"), }, }, + "urlSecret": { + SchemaProps: spec.SchemaProps{ + Description: "URLSecret is secret reference for rabbitmq service URL", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, - Required: []string{"url", "exchangeName", "exchangeType", "routingKey"}, + Required: []string{"exchangeName", "exchangeType", "routingKey"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.BasicAuth", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPConsumeConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPExchangeDeclareConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueBindConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueDeclareConfig"}, + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.BasicAuth", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPConsumeConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPExchangeDeclareConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueBindConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPQueueDeclareConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -308,92 +340,653 @@ func schema_pkg_apis_eventsource_v1alpha1_AMQPQueueDeclareConfig(ref common.Refe }, "durable": { SchemaProps: spec.SchemaProps{ - Description: "Durable keeps the queue also after the server restarts", - Type: []string{"boolean"}, - Format: "", + Description: "Durable keeps the queue also after the server restarts", + Type: []string{"boolean"}, + Format: "", + }, + }, + "autoDelete": { + SchemaProps: spec.SchemaProps{ + Description: "AutoDelete removes the queue when no consumers are active", + Type: []string{"boolean"}, + Format: "", + }, + }, + "exclusive": { + SchemaProps: spec.SchemaProps{ + Description: "Exclusive sets the queues to be accessible only by the connection that declares them and will be deleted wgen the connection closes", + Type: []string{"boolean"}, + Format: "", + }, + }, + "noWait": { + SchemaProps: spec.SchemaProps{ + Description: "NowWait when true, the queue assumes to be declared on the server", + Type: []string{"boolean"}, + Format: "", + }, + }, + "arguments": { + SchemaProps: spec.SchemaProps{ + Description: "Arguments of a queue (also known as \"x-arguments\") used for optional features and plugins", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_AzureEventsHubEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureEventsHubEventSource describes the event source for azure events hub More info at https://docs.microsoft.com/en-us/azure/event-hubs/", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "fqdn": { + SchemaProps: spec.SchemaProps{ + Description: "FQDN of the EventHubs namespace you created More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "sharedAccessKeyName": { + SchemaProps: spec.SchemaProps{ + Description: "SharedAccessKeyName is the name you chose for your application's SAS keys", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "sharedAccessKey": { + SchemaProps: spec.SchemaProps{ + Description: "SharedAccessKey is the generated value of the key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "hubName": { + SchemaProps: spec.SchemaProps{ + Description: "Event Hub path/name", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + }, + Required: []string{"fqdn", "hubName"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_AzureQueueStorageEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureQueueStorageEventSource describes the event source for azure queue storage more info at https://learn.microsoft.com/en-us/azure/storage/queues/", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "storageAccountName": { + SchemaProps: spec.SchemaProps{ + Description: "StorageAccountName is the name of the storage account where the queue is. This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set.", + Type: []string{"string"}, + Format: "", + }, + }, + "connectionString": { + SchemaProps: spec.SchemaProps{ + Description: "ConnectionString is the connection string to access Azure Queue Storage. If this fields is not provided it will try to access via Azure AD with StorageAccountName.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "queueName": { + SchemaProps: spec.SchemaProps{ + Description: "QueueName is the name of the queue", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "jsonBody": { + SchemaProps: spec.SchemaProps{ + Description: "JSONBody specifies that all event body payload coming from this source will be JSON", + Type: []string{"boolean"}, + Format: "", + }, + }, + "dlq": { + SchemaProps: spec.SchemaProps{ + Description: "DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "decodeMessage": { + SchemaProps: spec.SchemaProps{ + Description: "DecodeMessage specifies if all the messages should be base64 decoded. If set to true the decoding is done before the evaluation of JSONBody", + Type: []string{"boolean"}, + Format: "", + }, + }, + "waitTimeInSeconds": { + SchemaProps: spec.SchemaProps{ + Description: "WaitTimeInSeconds is the duration (in seconds) for which the event source waits between empty results from the queue. The default value is 3 seconds.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + Required: []string{"queueName"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_AzureServiceBusEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AzureServiceBusEventSource describes the event source for azure service bus More info at https://docs.microsoft.com/en-us/azure/service-bus-messaging/", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "connectionString": { + SchemaProps: spec.SchemaProps{ + Description: "ConnectionString is the connection string for the Azure Service Bus. If this fields is not provided it will try to access via Azure AD with DefaultAzureCredential and FullyQualifiedNamespace.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "queueName": { + SchemaProps: spec.SchemaProps{ + Description: "QueueName is the name of the Azure Service Bus Queue", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "topicName": { + SchemaProps: spec.SchemaProps{ + Description: "TopicName is the name of the Azure Service Bus Topic", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "subscriptionName": { + SchemaProps: spec.SchemaProps{ + Description: "SubscriptionName is the name of the Azure Service Bus Topic Subscription", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "tls": { + SchemaProps: spec.SchemaProps{ + Description: "TLS configuration for the service bus client", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"), + }, + }, + "jsonBody": { + SchemaProps: spec.SchemaProps{ + Description: "JSONBody specifies that all event body payload coming from this source will be JSON", + Type: []string{"boolean"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "fullyQualifiedNamespace": { + SchemaProps: spec.SchemaProps{ + Description: "FullyQualifiedNamespace is the Service Bus namespace name (ex: myservicebus.servicebus.windows.net). This field is necessary to access via Azure AD (managed identity) and it is ignored if ConnectionString is set.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"queueName", "topicName", "subscriptionName"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_BitbucketAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BitbucketAuth holds the different auth strategies for connecting to Bitbucket", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "basic": { + SchemaProps: spec.SchemaProps{ + Description: "Basic is BasicAuth auth strategy.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketBasicAuth"), + }, + }, + "oauthToken": { + SchemaProps: spec.SchemaProps{ + Description: "OAuthToken refers to the K8s secret that holds the OAuth Bearer token.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketBasicAuth", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_BitbucketBasicAuth(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BasicAuth holds the information required to authenticate user via basic auth mechanism", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "username": { + SchemaProps: spec.SchemaProps{ + Description: "Username refers to the K8s secret that holds the username.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "password": { + SchemaProps: spec.SchemaProps{ + Description: "Password refers to the K8s secret that holds the password.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + Required: []string{"username", "password"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_BitbucketEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BitbucketEventSource describes the event source for Bitbucket", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "deleteHookOnFinish": { + SchemaProps: spec.SchemaProps{ + Description: "DeleteHookOnFinish determines whether to delete the defined Bitbucket hook once the event source is stopped.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will be passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "webhook": { + SchemaProps: spec.SchemaProps{ + Description: "Webhook refers to the configuration required to run an http server", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext"), + }, + }, + "auth": { + SchemaProps: spec.SchemaProps{ + Description: "Auth information required to connect to Bitbucket.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketAuth"), + }, + }, + "events": { + SchemaProps: spec.SchemaProps{ + Description: "Events this webhook is subscribed to.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "owner": { + SchemaProps: spec.SchemaProps{ + Description: "DeprecatedOwner is the owner of the repository. Deprecated: use Repositories instead. Will be unsupported in v1.9", + Type: []string{"string"}, + Format: "", + }, + }, + "projectKey": { + SchemaProps: spec.SchemaProps{ + Description: "DeprecatedProjectKey is the key of the project to which the repository relates Deprecated: use Repositories instead. Will be unsupported in v1.9", + Type: []string{"string"}, + Format: "", + }, + }, + "repositorySlug": { + SchemaProps: spec.SchemaProps{ + Description: "DeprecatedRepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL Deprecated: use Repositories instead. Will be unsupported in v1.9", + Type: []string{"string"}, + Format: "", + }, + }, + "repositories": { + SchemaProps: spec.SchemaProps{ + Description: "Repositories holds a list of repositories for which integration needs to set up", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketRepository"), + }, + }, + }, + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + }, + Required: []string{"webhook", "auth", "events"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketAuth", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketRepository", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_BitbucketRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "owner": { + SchemaProps: spec.SchemaProps{ + Description: "Owner is the owner of the repository", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "repositorySlug": { + SchemaProps: spec.SchemaProps{ + Description: "RepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"owner", "repositorySlug"}, + }, + }, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_BitbucketServerEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "BitbucketServerEventSource refers to event-source related to Bitbucket Server events", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "webhook": { + SchemaProps: spec.SchemaProps{ + Description: "Webhook holds configuration to run a http server.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext"), + }, + }, + "projectKey": { + SchemaProps: spec.SchemaProps{ + Description: "DeprecatedProjectKey is the key of project for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8.", + Type: []string{"string"}, + Format: "", + }, + }, + "repositorySlug": { + SchemaProps: spec.SchemaProps{ + Description: "DeprecatedRepositorySlug is the slug of the repository for which integration needs to set up. Deprecated: use Repositories instead. Will be unsupported in v1.8.", + Type: []string{"string"}, + Format: "", + }, + }, + "projects": { + SchemaProps: spec.SchemaProps{ + Description: "Projects holds a list of projects for which integration needs to set up, this will add the webhook to all repositories in the project.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "repositories": { + SchemaProps: spec.SchemaProps{ + Description: "Repositories holds a list of repositories for which integration needs to set up.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerRepository"), + }, + }, + }, + }, + }, + "events": { + SchemaProps: spec.SchemaProps{ + Description: "Events are bitbucket event to listen to. Refer https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "skipBranchRefsChangedOnOpenPR": { + SchemaProps: spec.SchemaProps{ + Description: "SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for branches whenever there's an associated open pull request. This helps in optimizing the event handling process by avoiding unnecessary triggers for branch reference changes that are already part of a pull request under review.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "accessToken": { + SchemaProps: spec.SchemaProps{ + Description: "AccessToken is reference to K8s secret which holds the bitbucket api access information.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "webhookSecret": { + SchemaProps: spec.SchemaProps{ + Description: "WebhookSecret is reference to K8s secret which holds the bitbucket webhook secret (for HMAC validation).", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "bitbucketserverBaseURL": { + SchemaProps: spec.SchemaProps{ + Description: "BitbucketServerBaseURL is the base URL for API requests to a custom endpoint.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "deleteHookOnFinish": { + SchemaProps: spec.SchemaProps{ + Description: "DeleteHookOnFinish determines whether to delete the Bitbucket Server hook for the project once the event source is stopped.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, - "autoDelete": { + "filter": { SchemaProps: spec.SchemaProps{ - Description: "AutoDelete removes the queue when no consumers are active", - Type: []string{"boolean"}, - Format: "", + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), }, }, - "exclusive": { + "tls": { SchemaProps: spec.SchemaProps{ - Description: "Exclusive sets the queues to be accessible only by the connection that declares them and will be deleted wgen the connection closes", - Type: []string{"boolean"}, - Format: "", + Description: "TLS configuration for the bitbucketserver client.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"), }, }, - "noWait": { + "checkInterval": { SchemaProps: spec.SchemaProps{ - Description: "NowWait when true, the queue assumes to be declared on the server", - Type: []string{"boolean"}, + Description: "CheckInterval is a duration in which to wait before checking that the webhooks exist, e.g. 1s, 30m, 2h... (defaults to 1m)", + Default: "", + Type: []string{"string"}, Format: "", }, }, }, + Required: []string{"bitbucketserverBaseURL"}, }, }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerRepository", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, } } -func schema_pkg_apis_eventsource_v1alpha1_AzureEventsHubEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_eventsource_v1alpha1_BitbucketServerRepository(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "AzureEventsHubEventSource describes the event source for azure events hub More info at https://docs.microsoft.com/en-us/azure/event-hubs/", - Type: []string{"object"}, + Type: []string{"object"}, Properties: map[string]spec.Schema{ - "fqdn": { + "projectKey": { SchemaProps: spec.SchemaProps{ - Description: "FQDN of the EventHubs namespace you created More info at https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string", + Description: "ProjectKey is the key of project for which integration needs to set up.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "sharedAccessKeyName": { - SchemaProps: spec.SchemaProps{ - Description: "SharedAccessKeyName is the name you chose for your application's SAS keys", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "sharedAccessKey": { - SchemaProps: spec.SchemaProps{ - Description: "SharedAccessKey is the generated value of the key", - Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), - }, - }, - "hubName": { + "repositorySlug": { SchemaProps: spec.SchemaProps{ - Description: "Event Hub path/name", + Description: "RepositorySlug is the slug of the repository for which integration needs to set up.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Metadata holds the user defined metadata which will passed along the event payload.", - Type: []string{"object"}, - AdditionalProperties: &spec.SchemaOrBool{ - Allows: true, - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, }, - Required: []string{"fqdn", "hubName"}, + Required: []string{"projectKey", "repositorySlug"}, }, }, - Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -407,6 +1000,7 @@ func schema_pkg_apis_eventsource_v1alpha1_CalendarEventSource(ref common.Referen "schedule": { SchemaProps: spec.SchemaProps{ Description: "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + Default: "", Type: []string{"string"}, Format: "", }, @@ -414,18 +1008,21 @@ func schema_pkg_apis_eventsource_v1alpha1_CalendarEventSource(ref common.Referen "interval": { SchemaProps: spec.SchemaProps{ Description: "Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h...", + Default: "", Type: []string{"string"}, Format: "", }, }, "exclusionDates": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, + Description: "ExclusionDates defines the list of DATE-TIME exceptions for recurring events.", + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -438,13 +1035,6 @@ func schema_pkg_apis_eventsource_v1alpha1_CalendarEventSource(ref common.Referen Format: "", }, }, - "userPayload": { - SchemaProps: spec.SchemaProps{ - Description: "UserPayload will be sent to sensor as extra data once the event is triggered Deprecated: will be removed in v1.5. Please use Metadata instead.", - Type: []string{"string"}, - Format: "byte", - }, - }, "metadata": { SchemaProps: spec.SchemaProps{ Description: "Metadata holds the user defined metadata which will passed along the event payload.", @@ -453,8 +1043,9 @@ func schema_pkg_apis_eventsource_v1alpha1_CalendarEventSource(ref common.Referen Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -466,12 +1057,17 @@ func schema_pkg_apis_eventsource_v1alpha1_CalendarEventSource(ref common.Referen Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventPersistence"), }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, - Required: []string{"schedule", "interval"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventPersistence"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventPersistence", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"}, } } @@ -537,6 +1133,7 @@ func schema_pkg_apis_eventsource_v1alpha1_EmitterEventSource(ref common.Referenc "broker": { SchemaProps: spec.SchemaProps{ Description: "Broker URI to connect to.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -544,6 +1141,7 @@ func schema_pkg_apis_eventsource_v1alpha1_EmitterEventSource(ref common.Referenc "channelKey": { SchemaProps: spec.SchemaProps{ Description: "ChannelKey refers to the channel key", + Default: "", Type: []string{"string"}, Format: "", }, @@ -551,6 +1149,7 @@ func schema_pkg_apis_eventsource_v1alpha1_EmitterEventSource(ref common.Referenc "channelName": { SchemaProps: spec.SchemaProps{ Description: "ChannelName refers to the channel name", + Default: "", Type: []string{"string"}, Format: "", }, @@ -594,19 +1193,26 @@ func schema_pkg_apis_eventsource_v1alpha1_EmitterEventSource(ref common.Referenc Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, Required: []string{"broker", "channelKey", "channelName"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -659,17 +1265,20 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSource(ref common.ReferenceCallba }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, "spec": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceSpec"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceStatus"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceStatus"), }, }, }, @@ -681,6 +1290,24 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSource(ref common.ReferenceCallba } } +func schema_pkg_apis_eventsource_v1alpha1_EventSourceFilter(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "expression": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_eventsource_v1alpha1_EventSourceList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -704,7 +1331,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceList(ref common.ReferenceCa }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { @@ -713,7 +1341,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceList(ref common.ReferenceCa Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSource"), }, }, }, @@ -754,13 +1383,6 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Service"), }, }, - "replica": { - SchemaProps: spec.SchemaProps{ - Description: "DeprecatedReplica is the event source deployment replicas Deprecated: use replicas instead, will be removed in v1.5", - Type: []string{"integer"}, - Format: "int32", - }, - }, "minio": { SchemaProps: spec.SchemaProps{ Description: "Minio event sources", @@ -769,7 +1391,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.S3Artifact"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.S3Artifact"), }, }, }, @@ -783,7 +1406,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource"), }, }, }, @@ -797,7 +1421,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource"), }, }, }, @@ -811,7 +1436,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource"), }, }, }, @@ -825,7 +1451,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookEventSource"), }, }, }, @@ -839,7 +1466,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource"), }, }, }, @@ -853,7 +1481,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource"), }, }, }, @@ -867,7 +1496,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource"), }, }, }, @@ -881,7 +1511,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource"), }, }, }, @@ -895,7 +1526,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource"), }, }, }, @@ -909,7 +1541,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource"), }, }, }, @@ -923,7 +1556,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource"), }, }, }, @@ -937,7 +1571,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource"), }, }, }, @@ -951,7 +1586,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource"), }, }, }, @@ -965,7 +1601,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource"), }, }, }, @@ -979,7 +1616,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource"), }, }, }, @@ -993,7 +1631,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource"), }, }, }, @@ -1007,7 +1646,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource"), }, }, }, @@ -1021,7 +1661,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource"), }, }, }, @@ -1035,7 +1676,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource"), }, }, }, @@ -1049,7 +1691,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource"), }, }, }, @@ -1063,7 +1706,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource"), }, }, }, @@ -1077,7 +1721,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource"), }, }, }, @@ -1091,7 +1736,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource"), }, }, }, @@ -1104,11 +1750,116 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceSpec(ref common.ReferenceCa Format: "int32", }, }, + "bitbucketserver": { + SchemaProps: spec.SchemaProps{ + Description: "Bitbucket Server event sources", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerEventSource"), + }, + }, + }, + }, + }, + "bitbucket": { + SchemaProps: spec.SchemaProps{ + Description: "Bitbucket event sources", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketEventSource"), + }, + }, + }, + }, + }, + "redisStream": { + SchemaProps: spec.SchemaProps{ + Description: "Redis stream source", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisStreamEventSource"), + }, + }, + }, + }, + }, + "azureServiceBus": { + SchemaProps: spec.SchemaProps{ + Description: "Azure Service Bus event source", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureServiceBusEventSource"), + }, + }, + }, + }, + }, + "azureQueueStorage": { + SchemaProps: spec.SchemaProps{ + Description: "AzureQueueStorage event source", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureQueueStorageEventSource"), + }, + }, + }, + }, + }, + "sftp": { + SchemaProps: spec.SchemaProps{ + Description: "SFTP event sources", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SFTPEventSource"), + }, + }, + }, + }, + }, + "gerrit": { + SchemaProps: spec.SchemaProps{ + Description: "Gerrit event source", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GerritEventSource"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.S3Artifact", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Service", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Template", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext"}, + "github.com/argoproj/argo-events/pkg/apis/common.S3Artifact", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AMQPEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureEventsHubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureQueueStorageEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.AzureServiceBusEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.BitbucketServerEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.CalendarEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EmitterEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.FileEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GenericEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GerritEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GitlabEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.HDFSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.MQTTEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSEventsSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NSQEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PubSubEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PulsarEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.RedisStreamEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.ResourceEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SFTPEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SNSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SQSEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Service", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.SlackEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StorageGridEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.StripeEventSource", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Template", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookEventSource"}, } } @@ -1132,7 +1883,8 @@ func schema_pkg_apis_eventsource_v1alpha1_EventSourceStatus(ref common.Reference Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), }, }, }, @@ -1156,6 +1908,7 @@ func schema_pkg_apis_eventsource_v1alpha1_FileEventSource(ref common.ReferenceCa "eventType": { SchemaProps: spec.SchemaProps{ Description: "Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1163,6 +1916,7 @@ func schema_pkg_apis_eventsource_v1alpha1_FileEventSource(ref common.ReferenceCa "watchPathConfig": { SchemaProps: spec.SchemaProps{ Description: "WatchPathConfig contains configuration about the file path to watch", + Default: map[string]interface{}{}, Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WatchPathConfig"), }, }, @@ -1181,19 +1935,26 @@ func schema_pkg_apis_eventsource_v1alpha1_FileEventSource(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, Required: []string{"eventType", "watchPathConfig"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WatchPathConfig"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WatchPathConfig"}, } } @@ -1207,6 +1968,7 @@ func schema_pkg_apis_eventsource_v1alpha1_GenericEventSource(ref common.Referenc "url": { SchemaProps: spec.SchemaProps{ Description: "URL of the gRPC server that implements the event source.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1214,6 +1976,7 @@ func schema_pkg_apis_eventsource_v1alpha1_GenericEventSource(ref common.Referenc "config": { SchemaProps: spec.SchemaProps{ Description: "Config is the event source configuration", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1227,7 +1990,101 @@ func schema_pkg_apis_eventsource_v1alpha1_GenericEventSource(ref common.Referenc }, "jsonBody": { SchemaProps: spec.SchemaProps{ - Description: "JSONBody specifies that all event body payload coming from this source will be JSON", + Description: "JSONBody specifies that all event body payload coming from this source will be JSON", + Type: []string{"boolean"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "authSecret": { + SchemaProps: spec.SchemaProps{ + Description: "AuthSecret holds a secret selector that contains a bearer token for authentication", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + }, + Required: []string{"url", "config"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_GerritEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "GerritEventSource refers to event-source related to gerrit events", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "webhook": { + SchemaProps: spec.SchemaProps{ + Description: "Webhook holds configuration to run a http server", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext"), + }, + }, + "hookName": { + SchemaProps: spec.SchemaProps{ + Description: "HookName is the name of the webhook", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "events": { + SchemaProps: spec.SchemaProps{ + Description: "Events are gerrit event to listen to. Refer https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "auth": { + SchemaProps: spec.SchemaProps{ + Description: "Auth hosts secret selectors for username and password", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.BasicAuth"), + }, + }, + "gerritBaseURL": { + SchemaProps: spec.SchemaProps{ + Description: "GerritBaseURL is the base URL for API requests to a custom endpoint", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "deleteHookOnFinish": { + SchemaProps: spec.SchemaProps{ + Description: "DeleteHookOnFinish determines whether to delete the Gerrit hook for the project once the event source is stopped.", Type: []string{"boolean"}, Format: "", }, @@ -1240,21 +2097,81 @@ func schema_pkg_apis_eventsource_v1alpha1_GenericEventSource(ref common.Referenc Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, - "authSecret": { + "projects": { SchemaProps: spec.SchemaProps{ - Description: "AuthSecret holds a secret selector that contains a bearer token for authentication", + Description: "List of project namespace paths like \"whynowy/test\".", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "sslVerify": { + SchemaProps: spec.SchemaProps{ + Description: "SslVerify to enable ssl verification", + Type: []string{"boolean"}, + Format: "", + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + }, + Required: []string{"hookName", "events", "gerritBaseURL"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.BasicAuth", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_GithubAppCreds(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "privateKey": { + SchemaProps: spec.SchemaProps{ + Description: "PrivateKey refers to a K8s secret containing the GitHub app private key", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, + "appID": { + SchemaProps: spec.SchemaProps{ + Description: "AppID refers to the GitHub App ID for the application you created", + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, + "installationID": { + SchemaProps: spec.SchemaProps{ + Description: "InstallationID refers to the Installation ID of the GitHub app you created and installed", + Default: 0, + Type: []string{"integer"}, + Format: "int64", + }, + }, }, - Required: []string{"url", "config"}, + Required: []string{"privateKey", "appID", "installationID"}, }, }, Dependencies: []string{ @@ -1272,6 +2189,7 @@ func schema_pkg_apis_eventsource_v1alpha1_GithubEventSource(ref common.Reference "id": { SchemaProps: spec.SchemaProps{ Description: "Id is the webhook's id Deprecated: This is not used at all, will be removed in v1.6", + Default: 0, Type: []string{"integer"}, Format: "int64", }, @@ -1285,6 +2203,7 @@ func schema_pkg_apis_eventsource_v1alpha1_GithubEventSource(ref common.Reference "owner": { SchemaProps: spec.SchemaProps{ Description: "DeprecatedOwner refers to GitHub owner name i.e. argoproj Deprecated: use Repositories instead. Will be unsupported in v 1.6", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1292,18 +2211,21 @@ func schema_pkg_apis_eventsource_v1alpha1_GithubEventSource(ref common.Reference "repository": { SchemaProps: spec.SchemaProps{ Description: "DeprecatedRepository refers to GitHub repo name i.e. argo-events Deprecated: use Repositories instead. Will be unsupported in v 1.6", + Default: "", Type: []string{"string"}, Format: "", }, }, "events": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, + Description: "Events refer to Github events to which the event source will subscribe", + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1371,8 +2293,9 @@ func schema_pkg_apis_eventsource_v1alpha1_GithubEventSource(ref common.Reference Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1380,23 +2303,58 @@ func schema_pkg_apis_eventsource_v1alpha1_GithubEventSource(ref common.Reference }, "repositories": { SchemaProps: spec.SchemaProps{ - Description: "Repositories holds the information of repositories, which uses repo owner as the key, and list of repo names as the value", + Description: "Repositories holds the information of repositories, which uses repo owner as the key, and list of repo names as the value. Not required if Organizations is set.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.OwnedRepositories"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.OwnedRepositories"), }, }, }, }, }, + "organizations": { + SchemaProps: spec.SchemaProps{ + Description: "Organizations holds the names of organizations (used for organization level webhooks). Not required if Repositories is set.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "githubApp": { + SchemaProps: spec.SchemaProps{ + Description: "GitHubApp holds the GitHub app credentials", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubAppCreds"), + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "payloadEnrichment": { + SchemaProps: spec.SchemaProps{ + Description: "PayloadEnrichment holds flags that determine whether to enrich GitHub's original payload with additional information.", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PayloadEnrichmentFlags"), + }, + }, }, - Required: []string{"id", "owner", "repository", "events"}, + Required: []string{"events"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.OwnedRepositories", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.GithubAppCreds", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.OwnedRepositories", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.PayloadEnrichmentFlags", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -1415,7 +2373,7 @@ func schema_pkg_apis_eventsource_v1alpha1_GitlabEventSource(ref common.Reference }, "projectID": { SchemaProps: spec.SchemaProps{ - Description: "ProjectID is the id of project for which integration needs to setup", + Description: "DeprecatedProjectID is the id of project for which integration needs to setup Deprecated: use Projects instead. Will be unsupported in v 1.7", Type: []string{"string"}, Format: "", }, @@ -1427,8 +2385,9 @@ func schema_pkg_apis_eventsource_v1alpha1_GitlabEventSource(ref common.Reference Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1436,7 +2395,7 @@ func schema_pkg_apis_eventsource_v1alpha1_GitlabEventSource(ref common.Reference }, "accessToken": { SchemaProps: spec.SchemaProps{ - Description: "AccessToken is reference to k8 secret which holds the gitlab api access information", + Description: "AccessToken references to k8 secret which holds the gitlab api access information", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, @@ -1450,6 +2409,7 @@ func schema_pkg_apis_eventsource_v1alpha1_GitlabEventSource(ref common.Reference "gitlabBaseURL": { SchemaProps: spec.SchemaProps{ Description: "GitlabBaseURL is the base URL for API requests to a custom endpoint", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1469,19 +2429,62 @@ func schema_pkg_apis_eventsource_v1alpha1_GitlabEventSource(ref common.Reference Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "projects": { + SchemaProps: spec.SchemaProps{ + Description: "List of project IDs or project namespace paths like \"whynowy/test\". Projects and groups cannot be empty at the same time.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "secretToken": { + SchemaProps: spec.SchemaProps{ + Description: "SecretToken references to k8 secret which holds the Secret Token used by webhook config", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "groups": { + SchemaProps: spec.SchemaProps{ + Description: "List of group IDs or group name like \"test\". Group level hook available in Premium and Ultimate Gitlab.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, }, - Required: []string{"projectID", "events", "gitlabBaseURL"}, + Required: []string{"events", "gitlabBaseURL"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -1495,6 +2498,7 @@ func schema_pkg_apis_eventsource_v1alpha1_HDFSEventSource(ref common.ReferenceCa "directory": { SchemaProps: spec.SchemaProps{ Description: "Directory to watch for events", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1516,6 +2520,7 @@ func schema_pkg_apis_eventsource_v1alpha1_HDFSEventSource(ref common.ReferenceCa "type": { SchemaProps: spec.SchemaProps{ Description: "Type of file operations to watch", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1533,8 +2538,9 @@ func schema_pkg_apis_eventsource_v1alpha1_HDFSEventSource(ref common.ReferenceCa Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1594,19 +2600,26 @@ func schema_pkg_apis_eventsource_v1alpha1_HDFSEventSource(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, Required: []string{"directory", "type", "addresses"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.ConfigMapKeySelector", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -1619,6 +2632,7 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaConsumerGroup(ref common.Referenc "groupName": { SchemaProps: spec.SchemaProps{ Description: "The name for the consumer group to use", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1633,6 +2647,7 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaConsumerGroup(ref common.Referenc "rebalanceStrategy": { SchemaProps: spec.SchemaProps{ Description: "Rebalance strategy can be one of: sticky, roundrobin, range. Range is the default.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1654,6 +2669,7 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref common.ReferenceC "url": { SchemaProps: spec.SchemaProps{ Description: "URL to kafka cluster, multiple URLs separated by comma", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1661,6 +2677,7 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref common.ReferenceC "partition": { SchemaProps: spec.SchemaProps{ Description: "Partition name", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1668,6 +2685,7 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref common.ReferenceC "topic": { SchemaProps: spec.SchemaProps{ Description: "Topic name", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1699,8 +2717,9 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref common.ReferenceC Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1722,6 +2741,7 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref common.ReferenceC "version": { SchemaProps: spec.SchemaProps{ Description: "Specify what kafka version is being connected to enables certain features in sarama, defaults to 1.0.0", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1732,12 +2752,25 @@ func schema_pkg_apis_eventsource_v1alpha1_KafkaEventSource(ref common.ReferenceC Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.SASLConfig"), }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "config": { + SchemaProps: spec.SchemaProps{ + Description: "Yaml format Sarama config for Kafka connection. It follows the struct of sarama.Config. See https://github.com/IBM/sarama/blob/main/config.go e.g.\n\nconsumer:\n fetch:\n min: 1\nnet:\n MaxOpenRequests: 5", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"url", "partition", "topic"}, + Required: []string{"url", "topic"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaConsumerGroup"}, + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.KafkaConsumerGroup"}, } } @@ -1751,6 +2784,7 @@ func schema_pkg_apis_eventsource_v1alpha1_MQTTEventSource(ref common.ReferenceCa "url": { SchemaProps: spec.SchemaProps{ Description: "URL to connect to broker", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1758,6 +2792,7 @@ func schema_pkg_apis_eventsource_v1alpha1_MQTTEventSource(ref common.ReferenceCa "topic": { SchemaProps: spec.SchemaProps{ Description: "Topic name", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1765,6 +2800,7 @@ func schema_pkg_apis_eventsource_v1alpha1_MQTTEventSource(ref common.ReferenceCa "clientId": { SchemaProps: spec.SchemaProps{ Description: "ClientID is the id of the client", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1796,19 +2832,32 @@ func schema_pkg_apis_eventsource_v1alpha1_MQTTEventSource(ref common.ReferenceCa Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "auth": { + SchemaProps: spec.SchemaProps{ + Description: "Auth hosts secret selectors for username and password", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.BasicAuth"), + }, + }, }, Required: []string{"url", "topic", "clientId"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"}, + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.BasicAuth", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"}, } } @@ -1861,6 +2910,7 @@ func schema_pkg_apis_eventsource_v1alpha1_NATSEventsSource(ref common.ReferenceC "url": { SchemaProps: spec.SchemaProps{ Description: "URL to connect to NATS cluster", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1868,6 +2918,7 @@ func schema_pkg_apis_eventsource_v1alpha1_NATSEventsSource(ref common.ReferenceC "subject": { SchemaProps: spec.SchemaProps{ Description: "Subject holds the name of the subject onto which messages are published", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1899,8 +2950,9 @@ func schema_pkg_apis_eventsource_v1alpha1_NATSEventsSource(ref common.ReferenceC Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1912,12 +2964,25 @@ func schema_pkg_apis_eventsource_v1alpha1_NATSEventsSource(ref common.ReferenceC Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSAuth"), }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "queue": { + SchemaProps: spec.SchemaProps{ + Description: "Queue is the name of the queue group to subscribe as if specified. Uses QueueSubscribe logic to subscribe as queue group. If the queue is empty, uses default Subscribe logic.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"url", "subject"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSAuth"}, + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.NATSAuth"}, } } @@ -1931,6 +2996,7 @@ func schema_pkg_apis_eventsource_v1alpha1_NSQEventSource(ref common.ReferenceCal "hostAddress": { SchemaProps: spec.SchemaProps{ Description: "HostAddress is the address of the host for NSQ lookup", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1938,6 +3004,7 @@ func schema_pkg_apis_eventsource_v1alpha1_NSQEventSource(ref common.ReferenceCal "topic": { SchemaProps: spec.SchemaProps{ Description: "Topic to subscribe to.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1945,6 +3012,7 @@ func schema_pkg_apis_eventsource_v1alpha1_NSQEventSource(ref common.ReferenceCal "channel": { SchemaProps: spec.SchemaProps{ Description: "Channel used for subscription", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1976,19 +3044,26 @@ func schema_pkg_apis_eventsource_v1alpha1_NSQEventSource(ref common.ReferenceCal Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, Required: []string{"hostAddress", "topic", "channel"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"}, + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"}, } } @@ -2000,7 +3075,7 @@ func schema_pkg_apis_eventsource_v1alpha1_OwnedRepositories(ref common.Reference Properties: map[string]spec.Schema{ "owner": { SchemaProps: spec.SchemaProps{ - Description: "Orgnization or user name", + Description: "Organization or user name", Type: []string{"string"}, Format: "", }, @@ -2012,8 +3087,9 @@ func schema_pkg_apis_eventsource_v1alpha1_OwnedRepositories(ref common.Reference Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2025,6 +3101,25 @@ func schema_pkg_apis_eventsource_v1alpha1_OwnedRepositories(ref common.Reference } } +func schema_pkg_apis_eventsource_v1alpha1_PayloadEnrichmentFlags(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "fetchPROnPRCommentAdded": { + SchemaProps: spec.SchemaProps{ + Description: "FetchPROnPRCommentAdded determines whether to enrich the payload provided by GitHub on \"pull request comment added\" events, with the full pull request info", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2035,6 +3130,7 @@ func schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref common.Reference "projectID": { SchemaProps: spec.SchemaProps{ Description: "ProjectID is GCP project ID for the subscription. Required if you run Argo Events outside of GKE/GCE. (otherwise, the default value is its project)", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2042,6 +3138,7 @@ func schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref common.Reference "topicProjectID": { SchemaProps: spec.SchemaProps{ Description: "TopicProjectID is GCP project ID for the topic. By default, it is same as ProjectID.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2049,6 +3146,7 @@ func schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref common.Reference "topic": { SchemaProps: spec.SchemaProps{ Description: "Topic to which the subscription should belongs. Required if you want the eventsource to create a new subscription. If you specify this field along with an existing subscription, it will be verified whether it actually belongs to the specified topic.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2056,6 +3154,7 @@ func schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref common.Reference "subscriptionID": { SchemaProps: spec.SchemaProps{ Description: "SubscriptionID is ID of subscription. Required if you use existing subscription. The default value will be auto generated hash based on this eventsource setting, so the subscription might be recreated every time you update the setting, which has a possibility of event loss.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2080,13 +3179,6 @@ func schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref common.Reference Format: "", }, }, - "credentialsFile": { - SchemaProps: spec.SchemaProps{ - Description: "CredentialsFile is the file that contains credentials to authenticate for GCP Deprecated: will be removed in v1.5, use CredentialSecret instead", - Type: []string{"string"}, - Format: "", - }, - }, "metadata": { SchemaProps: spec.SchemaProps{ Description: "Metadata holds the user defined metadata which will passed along the event payload.", @@ -2095,19 +3187,25 @@ func schema_pkg_apis_eventsource_v1alpha1_PubSubEventSource(ref common.Reference Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, - Required: []string{"credentialsFile"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -2125,8 +3223,9 @@ func schema_pkg_apis_eventsource_v1alpha1_PulsarEventSource(ref common.Reference Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2142,6 +3241,7 @@ func schema_pkg_apis_eventsource_v1alpha1_PulsarEventSource(ref common.Reference "url": { SchemaProps: spec.SchemaProps{ Description: "Configure the service URL for the Pulsar service.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2180,9 +3280,123 @@ func schema_pkg_apis_eventsource_v1alpha1_PulsarEventSource(ref common.Reference }, "jsonBody": { SchemaProps: spec.SchemaProps{ - Description: "JSONBody specifies that all event body payload coming from this source will be JSON", - Type: []string{"boolean"}, - Format: "", + Description: "JSONBody specifies that all event body payload coming from this source will be JSON", + Type: []string{"boolean"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "authTokenSecret": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication token for the pulsar client. Either token or athenz can be set to use auth.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "authAthenzParams": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "authAthenzSecret": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + Required: []string{"topics", "url"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_RedisEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "RedisEventSource describes an event source for the Redis PubSub. More info at https://godoc.org/github.com/go-redis/redis#example-PubSub", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "hostAddress": { + SchemaProps: spec.SchemaProps{ + Description: "HostAddress refers to the address of the Redis host/server", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "password": { + SchemaProps: spec.SchemaProps{ + Description: "Password required for authentication if any.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace to use to retrieve the password from. It should only be specified if password is declared", + Type: []string{"string"}, + Format: "", + }, + }, + "db": { + SchemaProps: spec.SchemaProps{ + Description: "DB to use. If not specified, default DB 0 will be used.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "channels": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "tls": { + SchemaProps: spec.SchemaProps{ + Description: "TLS configuration for the redis client.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"), }, }, "metadata": { @@ -2193,32 +3407,54 @@ func schema_pkg_apis_eventsource_v1alpha1_PulsarEventSource(ref common.Reference Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "jsonBody": { + SchemaProps: spec.SchemaProps{ + Description: "JSONBody specifies that all event body payload coming from this source will be JSON", + Type: []string{"boolean"}, + Format: "", + }, + }, + "username": { + SchemaProps: spec.SchemaProps{ + Description: "Username required for ACL style authentication if any.", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"topics", "url"}, + Required: []string{"hostAddress", "channels"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, } } -func schema_pkg_apis_eventsource_v1alpha1_RedisEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_eventsource_v1alpha1_RedisStreamEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "RedisEventSource describes an event source for the Redis PubSub. More info at https://godoc.org/github.com/go-redis/redis#example-PubSub", + Description: "RedisStreamEventSource describes an event source for Redis streams (https://redis.io/topics/streams-intro)", Type: []string{"object"}, Properties: map[string]spec.Schema{ "hostAddress": { SchemaProps: spec.SchemaProps{ - Description: "HostAddress refers to the address of the Redis host/server", + Description: "HostAddress refers to the address of the Redis host/server (master instance)", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2229,13 +3465,6 @@ func schema_pkg_apis_eventsource_v1alpha1_RedisEventSource(ref common.ReferenceC Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, - "namespace": { - SchemaProps: spec.SchemaProps{ - Description: "Namespace to use to retrieve the password from. It should only be specified if password is declared", - Type: []string{"string"}, - Format: "", - }, - }, "db": { SchemaProps: spec.SchemaProps{ Description: "DB to use. If not specified, default DB 0 will be used.", @@ -2243,19 +3472,35 @@ func schema_pkg_apis_eventsource_v1alpha1_RedisEventSource(ref common.ReferenceC Format: "int32", }, }, - "channels": { + "streams": { SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, + Description: "Streams to look for entries. XREADGROUP is used on all streams using a single consumer group.", + Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "maxMsgCountPerRead": { + SchemaProps: spec.SchemaProps{ + Description: "MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. Same as COUNT option in XREADGROUP(https://redis.io/topics/streams-intro). Defaults to 10", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "consumerGroup": { + SchemaProps: spec.SchemaProps{ + Description: "ConsumerGroup refers to the Redis stream consumer group that will be created on all redis streams. Messages are read through this group. Defaults to 'argo-events-cg'", + Type: []string{"string"}, + Format: "", + }, + }, "tls": { SchemaProps: spec.SchemaProps{ Description: "TLS configuration for the redis client.", @@ -2270,19 +3515,33 @@ func schema_pkg_apis_eventsource_v1alpha1_RedisEventSource(ref common.ReferenceC Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "username": { + SchemaProps: spec.SchemaProps{ + Description: "Username required for ACL style authentication if any.", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"hostAddress", "channels"}, + Required: []string{"hostAddress", "streams"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -2296,6 +3555,7 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceEventSource(ref common.Referen "namespace": { SchemaProps: spec.SchemaProps{ Description: "Namespace where resource is deployed", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2308,20 +3568,23 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceEventSource(ref common.Referen }, "group": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, "version": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, "resource": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, "eventTypes": { @@ -2331,8 +3594,9 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceEventSource(ref common.Referen Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2346,15 +3610,24 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceEventSource(ref common.Referen Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "cluster": { + SchemaProps: spec.SchemaProps{ + Description: "Cluster from which events will be listened to", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"namespace", "group", "version", "resource", "eventTypes"}, + Required: []string{"namespace", "group", "version", "resource", "eventTypes", "cluster"}, }, }, Dependencies: []string{ @@ -2366,7 +3639,7 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceFilter(ref common.ReferenceCal return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "ResourceFilter contains K8 ObjectMeta information to further filter resource event objects", + Description: "ResourceFilter contains K8s ObjectMeta information to further filter resource event objects", Type: []string{"object"}, Properties: map[string]spec.Schema{ "prefix": { @@ -2378,12 +3651,13 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceFilter(ref common.ReferenceCal }, "labels": { SchemaProps: spec.SchemaProps{ - Description: "Labels provide listing options to K8s API to watch resource/s. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info.", + Description: "Labels provide listing options to K8s API to watch resource/s. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info. Unlike K8s field selector, multiple values are passed as comma separated values instead of list of values. Eg: value: value1,value2. Same as K8s label selector, operator \"=\", \"==\", \"!=\", \"exists\", \"!\", \"notin\", \"in\", \"gt\" and \"lt\" are supported", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Selector"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Selector"), }, }, }, @@ -2396,7 +3670,8 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceFilter(ref common.ReferenceCal Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Selector"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.Selector"), }, }, }, @@ -2423,6 +3698,91 @@ func schema_pkg_apis_eventsource_v1alpha1_ResourceFilter(ref common.ReferenceCal } } +func schema_pkg_apis_eventsource_v1alpha1_SFTPEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SFTPEventSource describes an event-source for sftp related events.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "eventType": { + SchemaProps: spec.SchemaProps{ + Description: "Type of file operations to watch Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "watchPathConfig": { + SchemaProps: spec.SchemaProps{ + Description: "WatchPathConfig contains configuration about the file path to watch", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WatchPathConfig"), + }, + }, + "username": { + SchemaProps: spec.SchemaProps{ + Description: "Username required for authentication if any.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "password": { + SchemaProps: spec.SchemaProps{ + Description: "Password required for authentication if any.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "sshKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "SSHKeySecret refers to the secret that contains SSH key. Key needs to contain private key and public key.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "address": { + SchemaProps: spec.SchemaProps{ + Description: "Address sftp address.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "pollIntervalDuration": { + SchemaProps: spec.SchemaProps{ + Description: "PollIntervalDuration the interval at which to poll the SFTP server defaults to 10 seconds", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"eventType", "watchPathConfig"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WatchPathConfig", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + func schema_pkg_apis_eventsource_v1alpha1_SNSEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2439,25 +3799,27 @@ func schema_pkg_apis_eventsource_v1alpha1_SNSEventSource(ref common.ReferenceCal "topicArn": { SchemaProps: spec.SchemaProps{ Description: "TopicArn", + Default: "", Type: []string{"string"}, Format: "", }, }, "accessKey": { SchemaProps: spec.SchemaProps{ - Description: "AccessKey refers K8 secret containing aws access key", + Description: "AccessKey refers K8s secret containing aws access key", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, "secretKey": { SchemaProps: spec.SchemaProps{ - Description: "SecretKey refers K8 secret containing aws secret key", + Description: "SecretKey refers K8s secret containing aws secret key", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, "region": { SchemaProps: spec.SchemaProps{ Description: "Region is AWS region", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2477,8 +3839,9 @@ func schema_pkg_apis_eventsource_v1alpha1_SNSEventSource(ref common.ReferenceCal Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2491,12 +3854,26 @@ func schema_pkg_apis_eventsource_v1alpha1_SNSEventSource(ref common.ReferenceCal Format: "", }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint configures connection to a specific SNS endpoint instead of Amazons servers", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"topicArn", "region"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -2509,19 +3886,20 @@ func schema_pkg_apis_eventsource_v1alpha1_SQSEventSource(ref common.ReferenceCal Properties: map[string]spec.Schema{ "accessKey": { SchemaProps: spec.SchemaProps{ - Description: "AccessKey refers K8 secret containing aws access key", + Description: "AccessKey refers K8s secret containing aws access key", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, "secretKey": { SchemaProps: spec.SchemaProps{ - Description: "SecretKey refers K8 secret containing aws secret key", + Description: "SecretKey refers K8s secret containing aws secret key", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, "region": { SchemaProps: spec.SchemaProps{ Description: "Region is AWS region", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2529,6 +3907,7 @@ func schema_pkg_apis_eventsource_v1alpha1_SQSEventSource(ref common.ReferenceCal "queue": { SchemaProps: spec.SchemaProps{ Description: "Queue is AWS SQS queue to listen to for messages", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2536,6 +3915,7 @@ func schema_pkg_apis_eventsource_v1alpha1_SQSEventSource(ref common.ReferenceCal "waitTimeSeconds": { SchemaProps: spec.SchemaProps{ Description: "WaitTimeSeconds is The duration (in seconds) for which the call waits for a message to arrive in the queue before returning.", + Default: 0, Type: []string{"integer"}, Format: "int64", }, @@ -2569,19 +3949,47 @@ func schema_pkg_apis_eventsource_v1alpha1_SQSEventSource(ref common.ReferenceCal Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "dlq": { + SchemaProps: spec.SchemaProps{ + Description: "DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. The default value is false.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "Endpoint configures connection to a specific SQS endpoint instead of Amazons servers", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "sessionToken": { + SchemaProps: spec.SchemaProps{ + Description: "SessionToken refers to K8s secret containing AWS temporary credentials(STS) session token", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, }, Required: []string{"region", "queue", "waitTimeSeconds"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -2595,13 +4003,14 @@ func schema_pkg_apis_eventsource_v1alpha1_Selector(ref common.ReferenceCallback) "key": { SchemaProps: spec.SchemaProps{ Description: "Key name", + Default: "", Type: []string{"string"}, Format: "", }, }, "operation": { SchemaProps: spec.SchemaProps{ - Description: "Supported operations like ==, !=, <=, >= etc. Defaults to ==. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info.", + Description: "Supported operations like ==, != etc. Defaults to ==. Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info.", Type: []string{"string"}, Format: "", }, @@ -2609,6 +4018,7 @@ func schema_pkg_apis_eventsource_v1alpha1_Selector(ref common.ReferenceCallback) "value": { SchemaProps: spec.SchemaProps{ Description: "Value", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2645,7 +4055,8 @@ func schema_pkg_apis_eventsource_v1alpha1_Service(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.ServicePort"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.ServicePort"), }, }, }, @@ -2699,18 +4110,25 @@ func schema_pkg_apis_eventsource_v1alpha1_SlackEventSource(ref common.ReferenceC Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.WebhookContext", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -2733,8 +4151,9 @@ func schema_pkg_apis_eventsource_v1alpha1_StorageGridEventSource(ref common.Refe Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2749,6 +4168,7 @@ func schema_pkg_apis_eventsource_v1alpha1_StorageGridEventSource(ref common.Refe "topicArn": { SchemaProps: spec.SchemaProps{ Description: "TopicArn", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2756,6 +4176,7 @@ func schema_pkg_apis_eventsource_v1alpha1_StorageGridEventSource(ref common.Refe "bucket": { SchemaProps: spec.SchemaProps{ Description: "Name of the bucket to register notifications for.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2776,6 +4197,7 @@ func schema_pkg_apis_eventsource_v1alpha1_StorageGridEventSource(ref common.Refe "apiURL": { SchemaProps: spec.SchemaProps{ Description: "APIURL is the url of the storagegrid api.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2788,8 +4210,9 @@ func schema_pkg_apis_eventsource_v1alpha1_StorageGridEventSource(ref common.Refe Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2813,14 +4236,16 @@ func schema_pkg_apis_eventsource_v1alpha1_StorageGridFilter(ref common.Reference Properties: map[string]spec.Schema{ "prefix": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, "suffix": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2863,8 +4288,9 @@ func schema_pkg_apis_eventsource_v1alpha1_StripeEventSource(ref common.Reference Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2878,8 +4304,9 @@ func schema_pkg_apis_eventsource_v1alpha1_StripeEventSource(ref common.Reference Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2932,7 +4359,8 @@ func schema_pkg_apis_eventsource_v1alpha1_Template(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Volume"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), }, }, }, @@ -2957,7 +4385,8 @@ func schema_pkg_apis_eventsource_v1alpha1_Template(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Toleration"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, @@ -2971,8 +4400,9 @@ func schema_pkg_apis_eventsource_v1alpha1_Template(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -2991,7 +4421,8 @@ func schema_pkg_apis_eventsource_v1alpha1_Template(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, @@ -3028,6 +4459,7 @@ func schema_pkg_apis_eventsource_v1alpha1_WatchPathConfig(ref common.ReferenceCa "directory": { SchemaProps: spec.SchemaProps{ Description: "Directory to watch for events", + Default: "", Type: []string{"string"}, Format: "", }, @@ -3063,6 +4495,7 @@ func schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref common.ReferenceCal "endpoint": { SchemaProps: spec.SchemaProps{ Description: "REST API endpoint", + Default: "", Type: []string{"string"}, Format: "", }, @@ -3070,6 +4503,7 @@ func schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref common.ReferenceCal "method": { SchemaProps: spec.SchemaProps{ Description: "Method is HTTP request method that indicates the desired action to be performed for a given resource. See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content", + Default: "", Type: []string{"string"}, Format: "", }, @@ -3077,6 +4511,7 @@ func schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref common.ReferenceCal "port": { SchemaProps: spec.SchemaProps{ Description: "Port on which HTTP server is listening for incoming events.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -3084,6 +4519,7 @@ func schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref common.ReferenceCal "url": { SchemaProps: spec.SchemaProps{ Description: "URL is the url of the server.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -3108,8 +4544,9 @@ func schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref common.ReferenceCal Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -3121,25 +4558,113 @@ func schema_pkg_apis_eventsource_v1alpha1_WebhookContext(ref common.ReferenceCal Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, - "serverCertPath": { + "maxPayloadSize": { + SchemaProps: spec.SchemaProps{ + Description: "MaxPayloadSize is the maximum webhook payload size that the server will accept. Requests exceeding that limit will be rejected with \"request too large\" response. Default value: 1048576 (1MB).", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + Required: []string{"endpoint", "method", "port", "url"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_eventsource_v1alpha1_WebhookEventSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "CalendarEventSource describes an HTTP based EventSource", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "endpoint": { + SchemaProps: spec.SchemaProps{ + Description: "REST API endpoint", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "method": { + SchemaProps: spec.SchemaProps{ + Description: "Method is HTTP request method that indicates the desired action to be performed for a given resource. See RFC7231 Hypertext Transfer Protocol (HTTP/1.1): Semantics and Content", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "port": { SchemaProps: spec.SchemaProps{ - Description: "DeprecatedServerCertPath refers the file that contains the cert.", + Description: "Port on which HTTP server is listening for incoming events.", + Default: "", Type: []string{"string"}, Format: "", }, }, - "serverKeyPath": { + "url": { SchemaProps: spec.SchemaProps{ - Description: "DeprecatedServerKeyPath refers the file that contains private key", + Description: "URL is the url of the server.", + Default: "", Type: []string{"string"}, Format: "", }, }, + "serverCertSecret": { + SchemaProps: spec.SchemaProps{ + Description: "ServerCertPath refers the file that contains the cert.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "serverKeySecret": { + SchemaProps: spec.SchemaProps{ + Description: "ServerKeyPath refers the file that contains private key", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Metadata holds the user defined metadata which will passed along the event payload.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "authSecret": { + SchemaProps: spec.SchemaProps{ + Description: "AuthSecret holds a secret selector that contains a bearer token for authentication", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "maxPayloadSize": { + SchemaProps: spec.SchemaProps{ + Description: "MaxPayloadSize is the maximum webhook payload size that the server will accept. Requests exceeding that limit will be rejected with \"request too large\" response. Default value: 1048576 (1MB).", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "filter": { + SchemaProps: spec.SchemaProps{ + Description: "Filter", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter"), + }, + }, }, Required: []string{"endpoint", "method", "port", "url"}, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1.EventSourceFilter", "k8s.io/api/core/v1.SecretKeySelector"}, } } diff --git a/pkg/apis/eventsource/v1alpha1/register.go b/pkg/apis/eventsource/v1alpha1/register.go index 3efef0e67a..e42671c04f 100644 --- a/pkg/apis/eventsource/v1alpha1/register.go +++ b/pkg/apis/eventsource/v1alpha1/register.go @@ -33,8 +33,8 @@ var ( // SchemeGroupVersion is a group version used to register these objects SchemeGroupVersion = schema.GroupVersion{Group: eventsource.Group, Version: "v1alpha1"} - // SchemaGroupVersionKind is a group version kind used to attach owner references to gateway-controller - SchemaGroupVersionKind = schema.GroupVersionKind{Group: eventsource.Group, Version: "v1alpha1", Kind: eventsource.Kind} + // SchemaGroupVersionKind is a group version kind used to attach owner references to eventsource-controller + SchemaGroupVersionKind = SchemeGroupVersion.WithKind(eventsource.Kind) // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) diff --git a/pkg/apis/eventsource/v1alpha1/types.go b/pkg/apis/eventsource/v1alpha1/types.go index 274bbed1cf..f4128d514d 100644 --- a/pkg/apis/eventsource/v1alpha1/types.go +++ b/pkg/apis/eventsource/v1alpha1/types.go @@ -17,8 +17,6 @@ limitations under the License. package v1alpha1 import ( - "encoding/json" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -48,6 +46,10 @@ type EventSourceList struct { Items []EventSource `json:"items" protobuf:"bytes,2,rep,name=items"` } +type EventSourceFilter struct { + Expression string `json:"expression,omitempty" protobuf:"bytes,1,opt,name=expression"` +} + // EventSourceSpec refers to specification of event-source resource type EventSourceSpec struct { // EventBusName references to a EventBus name. By default the value is "default" @@ -58,71 +60,79 @@ type EventSourceSpec struct { // Service is the specifications of the service to expose the event source // +optional Service *Service `json:"service,omitempty" protobuf:"bytes,3,opt,name=service"` - // DeprecatedReplica is the event source deployment replicas - // Deprecated: use replicas instead, will be removed in v1.5 - DeprecatedReplica *int32 `json:"replica,omitempty" protobuf:"varint,4,opt,name=replica"` - // Minio event sources - Minio map[string]apicommon.S3Artifact `json:"minio,omitempty" protobuf:"bytes,5,rep,name=minio"` + Minio map[string]apicommon.S3Artifact `json:"minio,omitempty" protobuf:"bytes,4,rep,name=minio"` // Calendar event sources - Calendar map[string]CalendarEventSource `json:"calendar,omitempty" protobuf:"bytes,6,rep,name=calendar"` + Calendar map[string]CalendarEventSource `json:"calendar,omitempty" protobuf:"bytes,5,rep,name=calendar"` // File event sources - File map[string]FileEventSource `json:"file,omitempty" protobuf:"bytes,7,rep,name=file"` + File map[string]FileEventSource `json:"file,omitempty" protobuf:"bytes,6,rep,name=file"` // Resource event sources - Resource map[string]ResourceEventSource `json:"resource,omitempty" protobuf:"bytes,8,rep,name=resource"` + Resource map[string]ResourceEventSource `json:"resource,omitempty" protobuf:"bytes,7,rep,name=resource"` // Webhook event sources - Webhook map[string]WebhookContext `json:"webhook,omitempty" protobuf:"bytes,9,rep,name=webhook"` + Webhook map[string]WebhookEventSource `json:"webhook,omitempty" protobuf:"bytes,8,rep,name=webhook"` // AMQP event sources - AMQP map[string]AMQPEventSource `json:"amqp,omitempty" protobuf:"bytes,10,rep,name=amqp"` + AMQP map[string]AMQPEventSource `json:"amqp,omitempty" protobuf:"bytes,9,rep,name=amqp"` // Kafka event sources - Kafka map[string]KafkaEventSource `json:"kafka,omitempty" protobuf:"bytes,11,rep,name=kafka"` + Kafka map[string]KafkaEventSource `json:"kafka,omitempty" protobuf:"bytes,10,rep,name=kafka"` // MQTT event sources - MQTT map[string]MQTTEventSource `json:"mqtt,omitempty" protobuf:"bytes,12,rep,name=mqtt"` + MQTT map[string]MQTTEventSource `json:"mqtt,omitempty" protobuf:"bytes,11,rep,name=mqtt"` // NATS event sources - NATS map[string]NATSEventsSource `json:"nats,omitempty" protobuf:"bytes,13,rep,name=nats"` + NATS map[string]NATSEventsSource `json:"nats,omitempty" protobuf:"bytes,12,rep,name=nats"` // SNS event sources - SNS map[string]SNSEventSource `json:"sns,omitempty" protobuf:"bytes,14,rep,name=sns"` + SNS map[string]SNSEventSource `json:"sns,omitempty" protobuf:"bytes,13,rep,name=sns"` // SQS event sources - SQS map[string]SQSEventSource `json:"sqs,omitempty" protobuf:"bytes,15,rep,name=sqs"` + SQS map[string]SQSEventSource `json:"sqs,omitempty" protobuf:"bytes,14,rep,name=sqs"` // PubSub event sources - PubSub map[string]PubSubEventSource `json:"pubSub,omitempty" protobuf:"bytes,16,rep,name=pubSub"` + PubSub map[string]PubSubEventSource `json:"pubSub,omitempty" protobuf:"bytes,15,rep,name=pubSub"` // Github event sources - Github map[string]GithubEventSource `json:"github,omitempty" protobuf:"bytes,17,rep,name=github"` + Github map[string]GithubEventSource `json:"github,omitempty" protobuf:"bytes,16,rep,name=github"` // Gitlab event sources - Gitlab map[string]GitlabEventSource `json:"gitlab,omitempty" protobuf:"bytes,18,rep,name=gitlab"` + Gitlab map[string]GitlabEventSource `json:"gitlab,omitempty" protobuf:"bytes,17,rep,name=gitlab"` // HDFS event sources - HDFS map[string]HDFSEventSource `json:"hdfs,omitempty" protobuf:"bytes,19,rep,name=hdfs"` + HDFS map[string]HDFSEventSource `json:"hdfs,omitempty" protobuf:"bytes,18,rep,name=hdfs"` // Slack event sources - Slack map[string]SlackEventSource `json:"slack,omitempty" protobuf:"bytes,20,rep,name=slack"` + Slack map[string]SlackEventSource `json:"slack,omitempty" protobuf:"bytes,19,rep,name=slack"` // StorageGrid event sources - StorageGrid map[string]StorageGridEventSource `json:"storageGrid,omitempty" protobuf:"bytes,21,rep,name=storageGrid"` + StorageGrid map[string]StorageGridEventSource `json:"storageGrid,omitempty" protobuf:"bytes,20,rep,name=storageGrid"` // AzureEventsHub event sources - AzureEventsHub map[string]AzureEventsHubEventSource `json:"azureEventsHub,omitempty" protobuf:"bytes,22,rep,name=azureEventsHub"` + AzureEventsHub map[string]AzureEventsHubEventSource `json:"azureEventsHub,omitempty" protobuf:"bytes,21,rep,name=azureEventsHub"` // Stripe event sources - Stripe map[string]StripeEventSource `json:"stripe,omitempty" protobuf:"bytes,23,rep,name=stripe"` + Stripe map[string]StripeEventSource `json:"stripe,omitempty" protobuf:"bytes,22,rep,name=stripe"` // Emitter event source - Emitter map[string]EmitterEventSource `json:"emitter,omitempty" protobuf:"bytes,24,rep,name=emitter"` + Emitter map[string]EmitterEventSource `json:"emitter,omitempty" protobuf:"bytes,23,rep,name=emitter"` // Redis event source - Redis map[string]RedisEventSource `json:"redis,omitempty" protobuf:"bytes,25,rep,name=redis"` + Redis map[string]RedisEventSource `json:"redis,omitempty" protobuf:"bytes,24,rep,name=redis"` // NSQ event source - NSQ map[string]NSQEventSource `json:"nsq,omitempty" protobuf:"bytes,26,rep,name=nsq"` + NSQ map[string]NSQEventSource `json:"nsq,omitempty" protobuf:"bytes,25,rep,name=nsq"` // Pulsar event source - Pulsar map[string]PulsarEventSource `json:"pulsar,omitempty" protobuf:"bytes,27,opt,name=pulsar"` + Pulsar map[string]PulsarEventSource `json:"pulsar,omitempty" protobuf:"bytes,26,opt,name=pulsar"` // Generic event source - Generic map[string]GenericEventSource `json:"generic,omitempty" protobuf:"bytes,28,rep,name=generic"` + Generic map[string]GenericEventSource `json:"generic,omitempty" protobuf:"bytes,27,rep,name=generic"` // Replicas is the event source deployment replicas - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,29,opt,name=replicas"` + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,28,opt,name=replicas"` + // Bitbucket Server event sources + BitbucketServer map[string]BitbucketServerEventSource `json:"bitbucketserver,omitempty" protobuf:"bytes,29,rep,name=bitbucketserver"` + // Bitbucket event sources + Bitbucket map[string]BitbucketEventSource `json:"bitbucket,omitempty" protobuf:"bytes,30,rep,name=bitbucket"` + // Redis stream source + RedisStream map[string]RedisStreamEventSource `json:"redisStream,omitempty" protobuf:"bytes,31,rep,name=redisStream"` + // Azure Service Bus event source + AzureServiceBus map[string]AzureServiceBusEventSource `json:"azureServiceBus,omitempty" protobuf:"bytes,32,rep,name=azureServiceBus"` + // AzureQueueStorage event source + AzureQueueStorage map[string]AzureQueueStorageEventSource `json:"azureQueueStorage,omitempty" protobuf:"bytes,33,rep,name=azureQueueStorage"` + // SFTP event sources + SFTP map[string]SFTPEventSource `json:"sftp,omitempty" protobuf:"bytes,34,rep,name=sftp"` + // Gerrit event source + Gerrit map[string]GerritEventSource `json:"gerrit,omitempty" protobuf:"bytes,35,rep,name=gerrit"` } func (e EventSourceSpec) GetReplicas() int32 { - if e.Replicas == nil && e.DeprecatedReplica == nil { + if e.Replicas == nil { return 1 } var replicas int32 if e.Replicas != nil { replicas = *e.Replicas - } else { - replicas = *e.DeprecatedReplica } if replicas < 1 { replicas = 1 @@ -208,28 +218,36 @@ type Service struct { ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,2,opt,name=clusterIP"` } +// CalendarEventSource describes an HTTP based EventSource +type WebhookEventSource struct { + WebhookContext `json:",inline" protobuf:"bytes,1,opt,name=webhookContext"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,2,opt,name=filter"` +} + // CalendarEventSource describes a time based dependency. One of the fields (schedule, interval, or recurrence) must be passed. // Schedule takes precedence over interval; interval takes precedence over recurrence type CalendarEventSource struct { // Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron + // +optional Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` // Interval is a string that describes an interval duration, e.g. 1s, 30m, 2h... + // +optional Interval string `json:"interval" protobuf:"bytes,2,opt,name=interval"` // ExclusionDates defines the list of DATE-TIME exceptions for recurring events. - ExclusionDates []string `json:"exclusionDates,omitempty" protobuf:"bytes,3,rep,name=exclusionDates"` // Timezone in which to run the schedule // +optional Timezone string `json:"timezone,omitempty" protobuf:"bytes,4,opt,name=timezone"` - // UserPayload will be sent to sensor as extra data once the event is triggered - // +optional - // Deprecated: will be removed in v1.5. Please use Metadata instead. - UserPayload json.RawMessage `json:"userPayload,omitempty" protobuf:"bytes,5,opt,name=userPayload,casttype=encoding/json.RawMessage"` // Metadata holds the user defined metadata which will passed along the event payload. // +optional - Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,6,rep,name=metadata"` + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,5,rep,name=metadata"` // Persistence hold the configuration for event persistence - Persistence *EventPersistence `json:"persistence,omitempty" protobuf:"bytes,7,opt,name=persistence"` + Persistence *EventPersistence `json:"persistence,omitempty" protobuf:"bytes,6,opt,name=persistence"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` } type EventPersistence struct { @@ -269,6 +287,36 @@ type FileEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,4,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,5,opt,name=filter"` +} + +// SFTPEventSource describes an event-source for sftp related events. +type SFTPEventSource struct { + // Type of file operations to watch + // Refer https://github.com/fsnotify/fsnotify/blob/master/fsnotify.go for more information + EventType string `json:"eventType" protobuf:"bytes,1,opt,name=eventType"` + // WatchPathConfig contains configuration about the file path to watch + WatchPathConfig WatchPathConfig `json:"watchPathConfig" protobuf:"bytes,2,opt,name=watchPathConfig"` + // Username required for authentication if any. + Username *corev1.SecretKeySelector `json:"username,omitempty" protobuf:"bytes,3,opt,name=username"` + // Password required for authentication if any. + Password *corev1.SecretKeySelector `json:"password,omitempty" protobuf:"bytes,4,opt,name=password"` + // SSHKeySecret refers to the secret that contains SSH key. Key needs to contain private key and public key. + SSHKeySecret *corev1.SecretKeySelector `json:"sshKeySecret,omitempty" protobuf:"bytes,5,opt,name=sshKeySecret"` + // Address sftp address. + Address *corev1.SecretKeySelector `json:"address,omitempty" protobuf:"bytes,6,opt,name=address"` + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,7,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` + // PollIntervalDuration the interval at which to poll the SFTP server + // defaults to 10 seconds + // +optional + PollIntervalDuration string `json:"pollIntervalDuration" protobuf:"varint,9,opt,name=pollIntervalDuration"` } // ResourceEventType is the type of event for the K8s resource mutation @@ -297,15 +345,21 @@ type ResourceEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,5,rep,name=metadata"` + // Cluster from which events will be listened to + Cluster string `json:"cluster" protobuf:"bytes,6,opt,name=cluster"` } -// ResourceFilter contains K8 ObjectMeta information to further filter resource event objects +// ResourceFilter contains K8s ObjectMeta information to further filter resource event objects type ResourceFilter struct { // Prefix filter is applied on the resource name. // +optional Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` // Labels provide listing options to K8s API to watch resource/s. // Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/label-selectors/ for more info. + // Unlike K8s field selector, multiple values are passed as comma separated values instead of list of values. + // Eg: value: value1,value2. + // Same as K8s label selector, operator "=", "==", "!=", "exists", "!", "notin", "in", "gt" and "lt" + // are supported // +optional Labels []Selector `json:"labels,omitempty" protobuf:"bytes,2,rep,name=labels"` // Fields provide field filters similar to K8s field selector @@ -327,7 +381,7 @@ type ResourceFilter struct { type Selector struct { // Key name Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // Supported operations like ==, !=, <=, >= etc. + // Supported operations like ==, != etc. // Defaults to ==. // Refer https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors for more info. // +optional @@ -338,8 +392,9 @@ type Selector struct { // AMQPEventSource refers to an event-source for AMQP stream events type AMQPEventSource struct { + // URL for rabbitmq service - URL string `json:"url" protobuf:"bytes,1,opt,name=url"` + URL string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` // ExchangeName is the exchange name // For more information, visit https://www.rabbitmq.com/tutorials/amqp-concepts.html ExchangeName string `json:"exchangeName" protobuf:"bytes,2,opt,name=exchangeName"` @@ -361,27 +416,32 @@ type AMQPEventSource struct { // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,8,rep,name=metadata"` // ExchangeDeclare holds the configuration for the exchange on the server - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.ExchangeDeclare + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.ExchangeDeclare // +optional ExchangeDeclare *AMQPExchangeDeclareConfig `json:"exchangeDeclare,omitempty" protobuf:"bytes,9,opt,name=exchangeDeclare"` // QueueDeclare holds the configuration of a queue to hold messages and deliver to consumers. // Declaring creates a queue if it doesn't already exist, or ensures that an existing queue matches // the same parameters - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueDeclare + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueDeclare // +optional QueueDeclare *AMQPQueueDeclareConfig `json:"queueDeclare,omitempty" protobuf:"bytes,10,opt,name=queueDeclare"` // QueueBind holds the configuration that binds an exchange to a queue so that publishings to the // exchange will be routed to the queue when the publishing routing key matches the binding routing key - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.QueueBind + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.QueueBind // +optional QueueBind *AMQPQueueBindConfig `json:"queueBind,omitempty" protobuf:"bytes,11,opt,name=queueBind"` // Consume holds the configuration to immediately starts delivering queued messages - // For more information, visit https://godoc.org/github.com/streadway/amqp#Channel.Consume + // For more information, visit https://pkg.go.dev/github.com/rabbitmq/amqp091-go#Channel.Consume // +optional Consume *AMQPConsumeConfig `json:"consume,omitempty" protobuf:"bytes,12,opt,name=consume"` // Auth hosts secret selectors for username and password // +optional Auth *apicommon.BasicAuth `json:"auth,omitempty" protobuf:"bytes,13,opt,name=auth"` + // URLSecret is secret reference for rabbitmq service URL + URLSecret *corev1.SecretKeySelector `json:"urlSecret,omitempty" protobuf:"bytes,14,opt,name=urlSecret"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,15,opt,name=filter"` } // AMQPExchangeDeclareConfig holds the configuration for the exchange on the server @@ -422,6 +482,9 @@ type AMQPQueueDeclareConfig struct { // NowWait when true, the queue assumes to be declared on the server // +optional NoWait bool `json:"noWait,omitempty" protobuf:"varint,5,opt,name=noWait"` + // Arguments of a queue (also known as "x-arguments") used for optional features and plugins + // +optional + Arguments string `json:"arguments,omitempty" protobuf:"bytes,6,opt,name=arguments"` } // AMQPQueueBindConfig holds the configuration that binds an exchange to a queue so that publishings to the @@ -459,6 +522,7 @@ type KafkaEventSource struct { // URL to kafka cluster, multiple URLs separated by comma URL string `json:"url" protobuf:"bytes,1,opt,name=url"` // Partition name + // +optional Partition string `json:"partition" protobuf:"bytes,2,opt,name=partition"` // Topic name Topic string `json:"topic" protobuf:"bytes,3,opt,name=topic"` @@ -489,6 +553,21 @@ type KafkaEventSource struct { // SASL configuration for the kafka client // +optional SASL *apicommon.SASLConfig `json:"sasl,omitempty" protobuf:"bytes,11,opt,name=sasl"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,12,opt,name=filter"` + // Yaml format Sarama config for Kafka connection. + // It follows the struct of sarama.Config. See https://github.com/IBM/sarama/blob/main/config.go + // e.g. + // + // consumer: + // fetch: + // min: 1 + // net: + // MaxOpenRequests: 5 + // + // +optional + Config string `json:"config,omitempty" protobuf:"bytes,13,opt,name=config"` } type KafkaConsumerGroup struct { @@ -522,6 +601,12 @@ type MQTTEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,7,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` + // Auth hosts secret selectors for username and password + // +optional + Auth *apicommon.BasicAuth `json:"auth,omitempty" protobuf:"bytes,9,opt,name=auth"` } // NATSEventsSource refers to event-source for NATS related events @@ -545,6 +630,13 @@ type NATSEventsSource struct { // Auth information // +optional Auth *NATSAuth `json:"auth,omitempty" protobuf:"bytes,7,opt,name=auth"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` + // Queue is the name of the queue group to subscribe as if specified. Uses QueueSubscribe + // logic to subscribe as queue group. If the queue is empty, uses default Subscribe logic. + // +optional + Queue *string `json:"queue" protobuf:"bytes,9,opt,name=queue"` } // NATSAuth refers to the auth info for NATS EventSource @@ -569,9 +661,9 @@ type SNSEventSource struct { Webhook *WebhookContext `json:"webhook,omitempty" protobuf:"bytes,1,opt,name=webhook"` // TopicArn TopicArn string `json:"topicArn" protobuf:"bytes,2,opt,name=topicArn"` - // AccessKey refers K8 secret containing aws access key + // AccessKey refers K8s secret containing aws access key AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty" protobuf:"bytes,3,opt,name=accessKey"` - // SecretKey refers K8 secret containing aws secret key + // SecretKey refers K8s secret containing aws secret key SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty" protobuf:"bytes,4,opt,name=secretKey"` // Region is AWS region Region string `json:"region" protobuf:"bytes,5,opt,name=region"` @@ -584,13 +676,19 @@ type SNSEventSource struct { // ValidateSignature is boolean that can be set to true for SNS signature verification // +optional ValidateSignature bool `json:"validateSignature,omitempty" protobuf:"varint,8,opt,name=validateSignature"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,9,opt,name=filter"` + // Endpoint configures connection to a specific SNS endpoint instead of Amazons servers + // +optional + Endpoint string `json:"endpoint" protobuf:"bytes,10,opt,name=endpoint"` } // SQSEventSource refers to event-source for AWS SQS related events type SQSEventSource struct { - // AccessKey refers K8 secret containing aws access key + // AccessKey refers K8s secret containing aws access key AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty" protobuf:"bytes,1,opt,name=accessKey"` - // SecretKey refers K8 secret containing aws secret key + // SecretKey refers K8s secret containing aws secret key SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty" protobuf:"bytes,2,opt,name=secretKey"` // Region is AWS region Region string `json:"region" protobuf:"bytes,3,opt,name=region"` @@ -612,6 +710,20 @@ type SQSEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,9,rep,name=metadata"` + // DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. + // If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. + // The default value is false. + // +optional + DLQ bool `json:"dlq,omitempty" protobuf:"varint,10,opt,name=dlq"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,11,opt,name=filter"` + // Endpoint configures connection to a specific SQS endpoint instead of Amazons servers + // +optional + Endpoint string `json:"endpoint" protobuf:"bytes,12,opt,name=endpoint"` + // SessionToken refers to K8s secret containing AWS temporary credentials(STS) session token + // +optional + SessionToken *corev1.SecretKeySelector `json:"sessionToken,omitempty" protobuf:"bytes,13,opt,name=sessionToken"` } // PubSubEventSource refers to event-source for GCP PubSub related events. @@ -649,36 +761,88 @@ type PubSubEventSource struct { // source will be JSON // +optional JSONBody bool `json:"jsonBody,omitempty" protobuf:"varint,7,opt,name=jsonBody"` - // CredentialsFile is the file that contains credentials to authenticate for GCP - // Deprecated: will be removed in v1.5, use CredentialSecret instead - DeprecatedCredentialsFile string `json:"credentialsFile" protobuf:"bytes,8,opt,name=credentialsFile"` // Metadata holds the user defined metadata which will passed along the event payload. // +optional - Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,9,rep,name=metadata"` + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,8,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,9,opt,name=filter"` +} + +// GerritEventSource refers to event-source related to gerrit events +type GerritEventSource struct { + // Webhook holds configuration to run a http server + Webhook *WebhookContext `json:"webhook,omitempty" protobuf:"bytes,1,opt,name=webhook"` + // HookName is the name of the webhook + HookName string `json:"hookName" protobuf:"bytes,2,opt,name=hookName"` + // Events are gerrit event to listen to. + // Refer https://gerrit-review.googlesource.com/Documentation/cmd-stream-events.html#events + Events []string `json:"events" protobuf:"bytes,3,opt,name=events"` + // Auth hosts secret selectors for username and password + // +optional + Auth *apicommon.BasicAuth `json:"auth,omitempty" protobuf:"bytes,4,opt,name=auth"` + // GerritBaseURL is the base URL for API requests to a custom endpoint + GerritBaseURL string `json:"gerritBaseURL" protobuf:"bytes,5,opt,name=gerritBaseURL"` + // DeleteHookOnFinish determines whether to delete the Gerrit hook for the project once the event source is stopped. + // +optional + DeleteHookOnFinish bool `json:"deleteHookOnFinish,omitempty" protobuf:"varint,6,opt,name=deleteHookOnFinish"` + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,7,rep,name=metadata"` + // List of project namespace paths like "whynowy/test". + Projects []string `json:"projects,omitempty" protobuf:"bytes,8,rep,name=projects"` + // SslVerify to enable ssl verification + // +optional + SslVerify bool `json:"sslVerify,omitempty" protobuf:"varint,9,opt,name=sslVerify"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,10,opt,name=filter"` +} + +func (g GerritEventSource) NeedToCreateHooks() bool { + return g.Auth != nil && g.Webhook != nil && g.Webhook.URL != "" } type OwnedRepositories struct { - // Orgnization or user name + // Organization or user name Owner string `json:"owner,omitempty" protobuf:"bytes,1,opt,name=owner"` // Repository names Names []string `json:"names,omitempty" protobuf:"bytes,2,rep,name=names"` } +type GithubAppCreds struct { + // PrivateKey refers to a K8s secret containing the GitHub app private key + PrivateKey *corev1.SecretKeySelector `json:"privateKey" protobuf:"bytes,1,opt,name=privateKey"` + // AppID refers to the GitHub App ID for the application you created + AppID int64 `json:"appID" protobuf:"bytes,2,opt,name=appID"` + // InstallationID refers to the Installation ID of the GitHub app you created and installed + InstallationID int64 `json:"installationID" protobuf:"bytes,3,opt,name=installationID"` +} + +type PayloadEnrichmentFlags struct { + // FetchPROnPRCommentAdded determines whether to enrich the payload provided by GitHub + // on "pull request comment added" events, with the full pull request info + // +optional + FetchPROnPRCommentAdded bool `json:"fetchPROnPRCommentAdded,omitempty" protobuf:"bytes,1,opt,name=fetchPROnPRCommentAdded"` +} + // GithubEventSource refers to event-source for github related events type GithubEventSource struct { // Id is the webhook's id // Deprecated: This is not used at all, will be removed in v1.6 + // +optional ID int64 `json:"id" protobuf:"varint,1,opt,name=id"` // Webhook refers to the configuration required to run a http server Webhook *WebhookContext `json:"webhook,omitempty" protobuf:"bytes,2,opt,name=webhook"` // DeprecatedOwner refers to GitHub owner name i.e. argoproj // Deprecated: use Repositories instead. Will be unsupported in v 1.6 + // +optional DeprecatedOwner string `json:"owner" protobuf:"bytes,3,opt,name=owner"` // DeprecatedRepository refers to GitHub repo name i.e. argo-events // Deprecated: use Repositories instead. Will be unsupported in v 1.6 + // +optional DeprecatedRepository string `json:"repository" protobuf:"bytes,4,opt,name=repository"` - // Events refer to Github events to subscribe to which the event source will subscribe - + // Events refer to Github events to which the event source will subscribe Events []string `json:"events" protobuf:"bytes,5,rep,name=events"` // APIToken refers to a K8s secret containing github api token // +optional @@ -708,8 +872,20 @@ type GithubEventSource struct { // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,14,rep,name=metadata"` // Repositories holds the information of repositories, which uses repo owner as the key, - // and list of repo names as the value + // and list of repo names as the value. Not required if Organizations is set. Repositories []OwnedRepositories `json:"repositories,omitempty" protobuf:"bytes,15,rep,name=repositories"` + // Organizations holds the names of organizations (used for organization level webhooks). Not required if Repositories is set. + Organizations []string `json:"organizations,omitempty" protobuf:"bytes,16,rep,name=organizations"` + // GitHubApp holds the GitHub app credentials + // +optional + GithubApp *GithubAppCreds `json:"githubApp,omitempty" protobuf:"bytes,17,opt,name=githubApp"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,18,opt,name=filter"` + // PayloadEnrichment holds flags that determine whether to enrich GitHub's original payload with + // additional information. + // +optional + PayloadEnrichment PayloadEnrichmentFlags `json:"payloadEnrichment,omitempty" protobuf:"bytes,19,rep,name=payloadEnrichment"` } func (g GithubEventSource) GetOwnedRepositories() []OwnedRepositories { @@ -728,20 +904,34 @@ func (g GithubEventSource) GetOwnedRepositories() []OwnedRepositories { return nil } +func (g GithubEventSource) HasGithubAPIToken() bool { + return g.APIToken != nil +} + +func (g GithubEventSource) HasGithubAppCreds() bool { + return g.GithubApp != nil && g.GithubApp.PrivateKey != nil +} + +func (g GithubEventSource) HasConfiguredWebhook() bool { + return g.Webhook != nil && g.Webhook.URL != "" +} + func (g GithubEventSource) NeedToCreateHooks() bool { - return g.APIToken != nil && g.Webhook != nil && g.Webhook.URL != "" + return (g.HasGithubAPIToken() || g.HasGithubAppCreds()) && g.HasConfiguredWebhook() } // GitlabEventSource refers to event-source related to Gitlab events type GitlabEventSource struct { // Webhook holds configuration to run a http server Webhook *WebhookContext `json:"webhook,omitempty" protobuf:"bytes,1,opt,name=webhook"` - // ProjectID is the id of project for which integration needs to setup - ProjectID string `json:"projectID" protobuf:"bytes,2,opt,name=projectID"` + // DeprecatedProjectID is the id of project for which integration needs to setup + // Deprecated: use Projects instead. Will be unsupported in v 1.7 + // +optional + DeprecatedProjectID string `json:"projectID,omitempty" protobuf:"bytes,2,opt,name=projectID"` // Events are gitlab event to listen to. // Refer https://github.com/xanzy/go-gitlab/blob/bf34eca5d13a9f4c3f501d8a97b8ac226d55e4d9/projects.go#L794. Events []string `json:"events" protobuf:"bytes,3,opt,name=events"` - // AccessToken is reference to k8 secret which holds the gitlab api access information + // AccessToken references to k8 secret which holds the gitlab api access information AccessToken *corev1.SecretKeySelector `json:"accessToken,omitempty" protobuf:"bytes,4,opt,name=accessToken"` // EnableSSLVerification to enable ssl verification // +optional @@ -754,6 +944,213 @@ type GitlabEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,9,rep,name=metadata"` + // List of project IDs or project namespace paths like "whynowy/test". Projects and groups cannot be empty at the same time. + // +optional + Projects []string `json:"projects,omitempty" protobuf:"bytes,10,rep,name=projects"` + // SecretToken references to k8 secret which holds the Secret Token used by webhook config + SecretToken *corev1.SecretKeySelector `json:"secretToken,omitempty" protobuf:"bytes,11,opt,name=secretToken"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,12,opt,name=filter"` + // List of group IDs or group name like "test". + // Group level hook available in Premium and Ultimate Gitlab. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,13,rep,name=groups"` +} + +func (g GitlabEventSource) GetProjects() []string { + if len(g.Projects) > 0 { + return g.Projects + } + if g.DeprecatedProjectID != "" { + return []string{g.DeprecatedProjectID} + } + return []string{} +} + +func (g GitlabEventSource) GetGroups() []string { + if len(g.Groups) > 0 { + return g.Groups + } + return []string{} +} + +func (g GitlabEventSource) NeedToCreateHooks() bool { + return g.AccessToken != nil && g.Webhook != nil && g.Webhook.URL != "" +} + +// BitbucketEventSource describes the event source for Bitbucket +type BitbucketEventSource struct { + // DeleteHookOnFinish determines whether to delete the defined Bitbucket hook once the event source is stopped. + // +optional + DeleteHookOnFinish bool `json:"deleteHookOnFinish,omitempty" protobuf:"varint,1,opt,name=deleteHookOnFinish"` + // Metadata holds the user defined metadata which will be passed along the event payload. + // +optional + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,2,rep,name=metadata"` + // Webhook refers to the configuration required to run an http server + Webhook *WebhookContext `json:"webhook" protobuf:"bytes,3,name=webhook"` + // Auth information required to connect to Bitbucket. + Auth *BitbucketAuth `json:"auth" protobuf:"bytes,4,name=auth"` + // Events this webhook is subscribed to. + Events []string `json:"events" protobuf:"bytes,5,name=events"` + // DeprecatedOwner is the owner of the repository. + // Deprecated: use Repositories instead. Will be unsupported in v1.9 + // +optional + DeprecatedOwner string `json:"owner,omitempty" protobuf:"bytes,6,name=owner"` + // DeprecatedProjectKey is the key of the project to which the repository relates + // Deprecated: use Repositories instead. Will be unsupported in v1.9 + // +optional + DeprecatedProjectKey string `json:"projectKey,omitempty" protobuf:"bytes,7,opt,name=projectKey"` + // DeprecatedRepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL + // Deprecated: use Repositories instead. Will be unsupported in v1.9 + // +optional + DeprecatedRepositorySlug string `json:"repositorySlug,omitempty" protobuf:"bytes,8,name=repositorySlug"` + // Repositories holds a list of repositories for which integration needs to set up + // +optional + Repositories []BitbucketRepository `json:"repositories,omitempty" protobuf:"bytes,9,rep,name=repositories"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,10,opt,name=filter"` +} + +func (b BitbucketEventSource) HasBitbucketBasicAuth() bool { + return b.Auth.Basic != nil && b.Auth.Basic.Username != nil && b.Auth.Basic.Password != nil +} + +func (b BitbucketEventSource) HasBitbucketOAuthToken() bool { + return b.Auth.OAuthToken != nil +} + +func (b BitbucketEventSource) HasConfiguredWebhook() bool { + return b.Webhook != nil && b.Webhook.URL != "" +} + +func (b BitbucketEventSource) ShouldCreateWebhooks() bool { + return (b.HasBitbucketBasicAuth() || b.HasBitbucketOAuthToken()) && b.HasConfiguredWebhook() +} + +func (b BitbucketEventSource) GetBitbucketRepositories() []BitbucketRepository { + if len(b.Repositories) > 0 { + return b.Repositories + } + + if b.DeprecatedOwner != "" && b.DeprecatedRepositorySlug != "" { + return []BitbucketRepository{ + { + Owner: b.DeprecatedOwner, + RepositorySlug: b.DeprecatedRepositorySlug, + }, + } + } + + return nil +} + +type BitbucketRepository struct { + // Owner is the owner of the repository + Owner string `json:"owner" protobuf:"bytes,1,name=owner"` + // RepositorySlug is a URL-friendly version of a repository name, automatically generated by Bitbucket for use in the URL + RepositorySlug string `json:"repositorySlug" protobuf:"bytes,2,rep,name=repositorySlug"` +} + +// GetRepositoryID helper returns a string key identifier for the repo +func (r BitbucketRepository) GetRepositoryID() string { + return r.Owner + "," + r.RepositorySlug +} + +// BitbucketAuth holds the different auth strategies for connecting to Bitbucket +type BitbucketAuth struct { + // Basic is BasicAuth auth strategy. + // +optional + Basic *BitbucketBasicAuth `json:"basic,omitempty" protobuf:"bytes,1,opt,name=basic"` + // OAuthToken refers to the K8s secret that holds the OAuth Bearer token. + // +optional + OAuthToken *corev1.SecretKeySelector `json:"oauthToken,omitempty" protobuf:"bytes,2,opt,name=oauthToken"` +} + +// BasicAuth holds the information required to authenticate user via basic auth mechanism +type BitbucketBasicAuth struct { + // Username refers to the K8s secret that holds the username. + Username *corev1.SecretKeySelector `json:"username" protobuf:"bytes,1,name=username"` + // Password refers to the K8s secret that holds the password. + Password *corev1.SecretKeySelector `json:"password" protobuf:"bytes,2,name=password"` +} + +// BitbucketServerEventSource refers to event-source related to Bitbucket Server events +type BitbucketServerEventSource struct { + // Webhook holds configuration to run a http server. + Webhook *WebhookContext `json:"webhook,omitempty" protobuf:"bytes,1,opt,name=webhook"` + // DeprecatedProjectKey is the key of project for which integration needs to set up. + // Deprecated: use Repositories instead. Will be unsupported in v1.8. + // +optional + DeprecatedProjectKey string `json:"projectKey,omitempty" protobuf:"bytes,2,opt,name=projectKey"` + // DeprecatedRepositorySlug is the slug of the repository for which integration needs to set up. + // Deprecated: use Repositories instead. Will be unsupported in v1.8. + // +optional + DeprecatedRepositorySlug string `json:"repositorySlug,omitempty" protobuf:"bytes,3,opt,name=repositorySlug"` + // Projects holds a list of projects for which integration needs to set up, this will add the webhook to all repositories in the project. + // +optional + Projects []string `json:"projects,omitempty" protobuf:"bytes,4,rep,name=projects"` + // Repositories holds a list of repositories for which integration needs to set up. + // +optional + Repositories []BitbucketServerRepository `json:"repositories,omitempty" protobuf:"bytes,5,rep,name=repositories"` + // Events are bitbucket event to listen to. + // Refer https://confluence.atlassian.com/bitbucketserver/event-payload-938025882.html + // +optional + Events []string `json:"events" protobuf:"bytes,6,rep,name=events"` + // SkipBranchRefsChangedOnOpenPR bypasses the event repo:refs_changed for branches whenever there's an associated open pull request. + // This helps in optimizing the event handling process by avoiding unnecessary triggers for branch reference changes that are already part of a pull request under review. + // +optional + SkipBranchRefsChangedOnOpenPR bool `json:"skipBranchRefsChangedOnOpenPR,omitempty" protobuf:"varint,7,opt,name=skipBranchRefsChangedOnOpenPR"` + // AccessToken is reference to K8s secret which holds the bitbucket api access information. + AccessToken *corev1.SecretKeySelector `json:"accessToken,omitempty" protobuf:"bytes,8,opt,name=accessToken"` + // WebhookSecret is reference to K8s secret which holds the bitbucket webhook secret (for HMAC validation). + WebhookSecret *corev1.SecretKeySelector `json:"webhookSecret,omitempty" protobuf:"bytes,9,opt,name=webhookSecret"` + // BitbucketServerBaseURL is the base URL for API requests to a custom endpoint. + BitbucketServerBaseURL string `json:"bitbucketserverBaseURL" protobuf:"bytes,10,opt,name=bitbucketserverBaseURL"` + // DeleteHookOnFinish determines whether to delete the Bitbucket Server hook for the project once the event source is stopped. + // +optional + DeleteHookOnFinish bool `json:"deleteHookOnFinish,omitempty" protobuf:"varint,11,opt,name=deleteHookOnFinish"` + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,12,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,13,opt,name=filter"` + // TLS configuration for the bitbucketserver client. + // +optional + TLS *apicommon.TLSConfig `json:"tls,omitempty" protobuf:"bytes,14,opt,name=tls"` + // CheckInterval is a duration in which to wait before checking that the webhooks exist, e.g. 1s, 30m, 2h... (defaults to 1m) + // +optional + CheckInterval string `json:"checkInterval" protobuf:"bytes,15,opt,name=checkInterval"` +} + +type BitbucketServerRepository struct { + // ProjectKey is the key of project for which integration needs to set up. + ProjectKey string `json:"projectKey" protobuf:"bytes,1,opt,name=projectKey"` + // RepositorySlug is the slug of the repository for which integration needs to set up. + RepositorySlug string `json:"repositorySlug" protobuf:"bytes,2,opt,name=repositorySlug"` +} + +func (b BitbucketServerEventSource) ShouldCreateWebhooks() bool { + return b.AccessToken != nil && b.Webhook != nil && b.Webhook.URL != "" +} + +func (b BitbucketServerEventSource) GetBitbucketServerRepositories() []BitbucketServerRepository { + if len(b.Repositories) > 0 { + return b.Repositories + } + + if b.DeprecatedProjectKey != "" && b.DeprecatedRepositorySlug != "" { + return []BitbucketServerRepository{ + { + ProjectKey: b.DeprecatedProjectKey, + RepositorySlug: b.DeprecatedRepositorySlug, + }, + } + } + + return nil } // HDFSEventSource refers to event-source for HDFS related events @@ -790,6 +1187,9 @@ type HDFSEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,12,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,13,opt,name=filter"` } // SlackEventSource refers to event-source for Slack related events @@ -803,6 +1203,9 @@ type SlackEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,4,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,5,opt,name=filter"` } // StorageGridEventSource refers to event-source for StorageGrid related events @@ -855,6 +1258,79 @@ type AzureEventsHubEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,5,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,6,opt,name=filter"` +} + +// AzureServiceBusEventSource describes the event source for azure service bus +// More info at https://docs.microsoft.com/en-us/azure/service-bus-messaging/ +type AzureServiceBusEventSource struct { + // ConnectionString is the connection string for the Azure Service Bus. If this fields is not provided + // it will try to access via Azure AD with DefaultAzureCredential and FullyQualifiedNamespace. + // +optional + ConnectionString *corev1.SecretKeySelector `json:"connectionString,omitempty" protobuf:"bytes,1,opt,name=connectionString"` + // QueueName is the name of the Azure Service Bus Queue + QueueName string `json:"queueName" protobuf:"bytes,2,opt,name=queueName"` + // TopicName is the name of the Azure Service Bus Topic + TopicName string `json:"topicName" protobuf:"bytes,3,opt,name=topicName"` + // SubscriptionName is the name of the Azure Service Bus Topic Subscription + SubscriptionName string `json:"subscriptionName" protobuf:"bytes,4,opt,name=subscriptionName"` + // TLS configuration for the service bus client + // +optional + TLS *apicommon.TLSConfig `json:"tls,omitempty" protobuf:"bytes,5,opt,name=tls"` + // JSONBody specifies that all event body payload coming from this + // source will be JSON + // +optional + JSONBody bool `json:"jsonBody,omitempty" protobuf:"varint,6,opt,name=jsonBody"` + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,7,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` + // FullyQualifiedNamespace is the Service Bus namespace name (ex: myservicebus.servicebus.windows.net). This field is necessary to + // access via Azure AD (managed identity) and it is ignored if ConnectionString is set. + // +optional + FullyQualifiedNamespace string `json:"fullyQualifiedNamespace,omitempty" protobuf:"bytes,9,opt,name=fullyQualifiedNamespace"` +} + +// AzureQueueStorageEventSource describes the event source for azure queue storage +// more info at https://learn.microsoft.com/en-us/azure/storage/queues/ +type AzureQueueStorageEventSource struct { + // StorageAccountName is the name of the storage account where the queue is. This field is necessary to + // access via Azure AD (managed identity) and it is ignored if ConnectionString is set. + // +optional + StorageAccountName string `json:"storageAccountName,omitempty" protobuf:"bytes,1,opt,name=storageAccountName"` + // ConnectionString is the connection string to access Azure Queue Storage. If this fields is not provided + // it will try to access via Azure AD with StorageAccountName. + // +optional + ConnectionString *corev1.SecretKeySelector `json:"connectionString,omitempty" protobuf:"bytes,2,opt,name=connectionString"` + // QueueName is the name of the queue + QueueName string `json:"queueName" protobuf:"bytes,3,opt,name=queueName"` + // JSONBody specifies that all event body payload coming from this + // source will be JSON + // +optional + JSONBody bool `json:"jsonBody,omitempty" protobuf:"varint,4,opt,name=jsonBody"` + // DLQ specifies if a dead-letter queue is configured for messages that can't be processed successfully. + // If set to true, messages with invalid payload won't be acknowledged to allow to forward them farther to the dead-letter queue. + // The default value is false. + // +optional + DLQ bool `json:"dlq,omitempty" protobuf:"varint,5,opt,name=dlq"` + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,6,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,7,opt,name=filter"` + // DecodeMessage specifies if all the messages should be base64 decoded. + // If set to true the decoding is done before the evaluation of JSONBody + // +optional + DecodeMessage bool `json:"decodeMessage,omitempty" protobuf:"bytes,8,opt,name=decodeMessage"` + // WaitTimeInSeconds is the duration (in seconds) for which the event source waits between empty results from the queue. + // The default value is 3 seconds. + // +optional + WaitTimeInSeconds *int32 `json:"waitTimeInSeconds,omitempty" protobuf:"varint,9,opt,name=waitTimeInSeconds"` } // StripeEventSource describes the event source for stripe webhook notifications @@ -905,6 +1381,9 @@ type EmitterEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,9,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,10,opt,name=filter"` } // RedisEventSource describes an event source for the Redis PubSub. @@ -930,6 +1409,52 @@ type RedisEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,7,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` + // JSONBody specifies that all event body payload coming from this + // source will be JSON + // +optional + JSONBody bool `json:"jsonBody,omitempty" protobuf:"varint,9,opt,name=jsonBody"` + // Username required for ACL style authentication if any. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,10,opt,name=username"` +} + +// RedisStreamEventSource describes an event source for +// Redis streams (https://redis.io/topics/streams-intro) +type RedisStreamEventSource struct { + // HostAddress refers to the address of the Redis host/server (master instance) + HostAddress string `json:"hostAddress" protobuf:"bytes,1,opt,name=hostAddress"` + // Password required for authentication if any. + // +optional + Password *corev1.SecretKeySelector `json:"password,omitempty" protobuf:"bytes,2,opt,name=password"` + // DB to use. If not specified, default DB 0 will be used. + // +optional + DB int32 `json:"db,omitempty" protobuf:"varint,3,opt,name=db"` + // Streams to look for entries. XREADGROUP is used on all streams using a single consumer group. + Streams []string `json:"streams" protobuf:"bytes,4,rep,name=streams"` + // MaxMsgCountPerRead holds the maximum number of messages per stream that will be read in each XREADGROUP of all streams + // Example: if there are 2 streams and MaxMsgCountPerRead=10, then each XREADGROUP may read upto a total of 20 messages. + // Same as COUNT option in XREADGROUP(https://redis.io/topics/streams-intro). Defaults to 10 + // +optional + MaxMsgCountPerRead int32 `json:"maxMsgCountPerRead,omitempty" protobuf:"varint,5,opt,name=maxMsgCountPerRead"` + // ConsumerGroup refers to the Redis stream consumer group that will be + // created on all redis streams. Messages are read through this group. Defaults to 'argo-events-cg' + // +optional + ConsumerGroup string `json:"consumerGroup,omitempty" protobuf:"bytes,6,opt,name=consumerGroup"` + // TLS configuration for the redis client. + // +optional + TLS *apicommon.TLSConfig `json:"tls,omitempty" protobuf:"bytes,7,opt,name=tls"` + // Metadata holds the user defined metadata which will passed along the event payload. + // +optional + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,8,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,9,opt,name=filter"` + // Username required for ACL style authentication if any. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,10,opt,name=username"` } // NSQEventSource describes the event source for NSQ PubSub @@ -954,6 +1479,9 @@ type NSQEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,7,rep,name=metadata"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,8,opt,name=filter"` } // PulsarEventSource describes the event source for Apache Pulsar @@ -991,6 +1519,22 @@ type PulsarEventSource struct { // Metadata holds the user defined metadata which will passed along the event payload. // +optional Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,10,rep,name=metadata"` + // Authentication token for the pulsar client. + // Either token or athenz can be set to use auth. + // +optional + AuthTokenSecret *corev1.SecretKeySelector `json:"authTokenSecret,omitempty" protobuf:"bytes,11,opt,name=authTokenSecret"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,12,opt,name=filter"` + // Authentication athenz parameters for the pulsar client. + // Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go + // Either token or athenz can be set to use auth. + // +optional + AuthAthenzParams map[string]string `json:"authAthenzParams,omitempty" protobuf:"bytes,13,rep,name=authAthenzParams"` + // Authentication athenz privateKey secret for the pulsar client. + // AuthAthenzSecret must be set if AuthAthenzParams is used. + // +optional + AuthAthenzSecret *corev1.SecretKeySelector `json:"authAthenzSecret,omitempty" protobuf:"bytes,14,opt,name=authAthenzSecret"` } // GenericEventSource refers to a generic event source. It can be used to implement a custom event source. @@ -1011,6 +1555,9 @@ type GenericEventSource struct { // AuthSecret holds a secret selector that contains a bearer token for authentication // +optional AuthSecret *corev1.SecretKeySelector `json:"authSecret,omitempty" protobuf:"bytes,6,opt,name=authSecret"` + // Filter + // +optional + Filter *EventSourceFilter `json:"filter,omitempty" protobuf:"bytes,7,opt,name=filter"` } const ( diff --git a/pkg/apis/eventsource/v1alpha1/types_test.go b/pkg/apis/eventsource/v1alpha1/types_test.go index 3e4f7373a9..e1af8ef86a 100644 --- a/pkg/apis/eventsource/v1alpha1/types_test.go +++ b/pkg/apis/eventsource/v1alpha1/types_test.go @@ -13,13 +13,6 @@ func TestGetReplicas(t *testing.T) { assert.Equal(t, ep.GetReplicas(), int32(1)) ep.Replicas = convertInt(t, 2) assert.Equal(t, ep.GetReplicas(), int32(2)) - ep.Replicas = nil - ep.DeprecatedReplica = convertInt(t, 0) - assert.Equal(t, ep.GetReplicas(), int32(1)) - ep.DeprecatedReplica = convertInt(t, 1) - assert.Equal(t, ep.GetReplicas(), int32(1)) - ep.DeprecatedReplica = convertInt(t, 2) - assert.Equal(t, ep.GetReplicas(), int32(2)) } func convertInt(t *testing.T, num int) *int32 { diff --git a/pkg/apis/eventsource/v1alpha1/validate.go b/pkg/apis/eventsource/v1alpha1/validate.go index c912ba24b5..511f5e1cc8 100644 --- a/pkg/apis/eventsource/v1alpha1/validate.go +++ b/pkg/apis/eventsource/v1alpha1/validate.go @@ -16,14 +16,12 @@ limitations under the License. package v1alpha1 -import ( - "github.com/pkg/errors" -) +import fmt "fmt" // ValidateEventSource validates a generic event source func ValidateEventSource(eventSource *EventSource) error { if eventSource == nil { - return errors.New("event source can't be nil") + return fmt.Errorf("event source can't be nil") } return nil } diff --git a/pkg/apis/eventsource/v1alpha1/webhook_context.go b/pkg/apis/eventsource/v1alpha1/webhook_context.go index 3ae2df0622..d261ded9ef 100644 --- a/pkg/apis/eventsource/v1alpha1/webhook_context.go +++ b/pkg/apis/eventsource/v1alpha1/webhook_context.go @@ -4,6 +4,8 @@ import ( corev1 "k8s.io/api/core/v1" ) +const DefaultMaxWebhookPayloadSize int64 = 1048576 // 1MB + // WebhookContext holds a general purpose REST API context type WebhookContext struct { // REST API endpoint @@ -25,8 +27,18 @@ type WebhookContext struct { // AuthSecret holds a secret selector that contains a bearer token for authentication // +optional AuthSecret *corev1.SecretKeySelector `json:"authSecret,omitempty" protobuf:"bytes,8,opt,name=authSecret"` - // DeprecatedServerCertPath refers the file that contains the cert. - DeprecatedServerCertPath string `json:"serverCertPath,omitempty" protobuf:"bytes,9,opt,name=serverCertPath"` - // DeprecatedServerKeyPath refers the file that contains private key - DeprecatedServerKeyPath string `json:"serverKeyPath,omitempty" protobuf:"bytes,10,opt,name=serverKeyPath"` + // MaxPayloadSize is the maximum webhook payload size that the server will accept. + // Requests exceeding that limit will be rejected with "request too large" response. + // Default value: 1048576 (1MB). + // +optional + MaxPayloadSize *int64 `json:"maxPayloadSize,omitempty" protobuf:"bytes,9,opt,name=maxPayloadSize"` +} + +func (wc *WebhookContext) GetMaxPayloadSize() int64 { + maxPayloadSize := DefaultMaxWebhookPayloadSize + if wc != nil && wc.MaxPayloadSize != nil { + maxPayloadSize = *wc.MaxPayloadSize + } + + return maxPayloadSize } diff --git a/pkg/apis/eventsource/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/eventsource/v1alpha1/zz_generated.deepcopy.go index 176d998f64..a0f1650a95 100644 --- a/pkg/apis/eventsource/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/eventsource/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,8 +22,6 @@ limitations under the License. package v1alpha1 import ( - json "encoding/json" - common "github.com/argoproj/argo-events/pkg/apis/common" v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -89,6 +88,16 @@ func (in *AMQPEventSource) DeepCopyInto(out *AMQPEventSource) { *out = new(common.BasicAuth) (*in).DeepCopyInto(*out) } + if in.URLSecret != nil { + in, out := &in.URLSecret, &out.URLSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -170,6 +179,11 @@ func (in *AzureEventsHubEventSource) DeepCopyInto(out *AzureEventsHubEventSource (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -183,6 +197,277 @@ func (in *AzureEventsHubEventSource) DeepCopy() *AzureEventsHubEventSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureQueueStorageEventSource) DeepCopyInto(out *AzureQueueStorageEventSource) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + if in.WaitTimeInSeconds != nil { + in, out := &in.WaitTimeInSeconds, &out.WaitTimeInSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureQueueStorageEventSource. +func (in *AzureQueueStorageEventSource) DeepCopy() *AzureQueueStorageEventSource { + if in == nil { + return nil + } + out := new(AzureQueueStorageEventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureServiceBusEventSource) DeepCopyInto(out *AzureServiceBusEventSource) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(common.TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureServiceBusEventSource. +func (in *AzureServiceBusEventSource) DeepCopy() *AzureServiceBusEventSource { + if in == nil { + return nil + } + out := new(AzureServiceBusEventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketAuth) DeepCopyInto(out *BitbucketAuth) { + *out = *in + if in.Basic != nil { + in, out := &in.Basic, &out.Basic + *out = new(BitbucketBasicAuth) + (*in).DeepCopyInto(*out) + } + if in.OAuthToken != nil { + in, out := &in.OAuthToken, &out.OAuthToken + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketAuth. +func (in *BitbucketAuth) DeepCopy() *BitbucketAuth { + if in == nil { + return nil + } + out := new(BitbucketAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketBasicAuth) DeepCopyInto(out *BitbucketBasicAuth) { + *out = *in + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketBasicAuth. +func (in *BitbucketBasicAuth) DeepCopy() *BitbucketBasicAuth { + if in == nil { + return nil + } + out := new(BitbucketBasicAuth) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketEventSource) DeepCopyInto(out *BitbucketEventSource) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookContext) + (*in).DeepCopyInto(*out) + } + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(BitbucketAuth) + (*in).DeepCopyInto(*out) + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Repositories != nil { + in, out := &in.Repositories, &out.Repositories + *out = make([]BitbucketRepository, len(*in)) + copy(*out, *in) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketEventSource. +func (in *BitbucketEventSource) DeepCopy() *BitbucketEventSource { + if in == nil { + return nil + } + out := new(BitbucketEventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketRepository) DeepCopyInto(out *BitbucketRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketRepository. +func (in *BitbucketRepository) DeepCopy() *BitbucketRepository { + if in == nil { + return nil + } + out := new(BitbucketRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketServerEventSource) DeepCopyInto(out *BitbucketServerEventSource) { + *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookContext) + (*in).DeepCopyInto(*out) + } + if in.Projects != nil { + in, out := &in.Projects, &out.Projects + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Repositories != nil { + in, out := &in.Repositories, &out.Repositories + *out = make([]BitbucketServerRepository, len(*in)) + copy(*out, *in) + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessToken != nil { + in, out := &in.AccessToken, &out.AccessToken + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.WebhookSecret != nil { + in, out := &in.WebhookSecret, &out.WebhookSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(common.TLSConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketServerEventSource. +func (in *BitbucketServerEventSource) DeepCopy() *BitbucketServerEventSource { + if in == nil { + return nil + } + out := new(BitbucketServerEventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketServerRepository) DeepCopyInto(out *BitbucketServerRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketServerRepository. +func (in *BitbucketServerRepository) DeepCopy() *BitbucketServerRepository { + if in == nil { + return nil + } + out := new(BitbucketServerRepository) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CalendarEventSource) DeepCopyInto(out *CalendarEventSource) { *out = *in @@ -191,11 +476,6 @@ func (in *CalendarEventSource) DeepCopyInto(out *CalendarEventSource) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.UserPayload != nil { - in, out := &in.UserPayload, &out.UserPayload - *out = make(json.RawMessage, len(*in)) - copy(*out, *in) - } if in.Metadata != nil { in, out := &in.Metadata, &out.Metadata *out = make(map[string]string, len(*in)) @@ -208,6 +488,11 @@ func (in *CalendarEventSource) DeepCopyInto(out *CalendarEventSource) { *out = new(EventPersistence) (*in).DeepCopyInto(*out) } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -283,6 +568,11 @@ func (in *EmitterEventSource) DeepCopyInto(out *EmitterEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -350,6 +640,22 @@ func (in *EventSource) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSourceFilter) DeepCopyInto(out *EventSourceFilter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSourceFilter. +func (in *EventSourceFilter) DeepCopy() *EventSourceFilter { + if in == nil { + return nil + } + out := new(EventSourceFilter) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventSourceList) DeepCopyInto(out *EventSourceList) { *out = *in @@ -396,11 +702,6 @@ func (in *EventSourceSpec) DeepCopyInto(out *EventSourceSpec) { *out = new(Service) (*in).DeepCopyInto(*out) } - if in.DeprecatedReplica != nil { - in, out := &in.DeprecatedReplica, &out.DeprecatedReplica - *out = new(int32) - **out = **in - } if in.Minio != nil { in, out := &in.Minio, &out.Minio *out = make(map[string]common.S3Artifact, len(*in)) @@ -431,7 +732,7 @@ func (in *EventSourceSpec) DeepCopyInto(out *EventSourceSpec) { } if in.Webhook != nil { in, out := &in.Webhook, &out.Webhook - *out = make(map[string]WebhookContext, len(*in)) + *out = make(map[string]WebhookEventSource, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -574,6 +875,55 @@ func (in *EventSourceSpec) DeepCopyInto(out *EventSourceSpec) { *out = new(int32) **out = **in } + if in.BitbucketServer != nil { + in, out := &in.BitbucketServer, &out.BitbucketServer + *out = make(map[string]BitbucketServerEventSource, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Bitbucket != nil { + in, out := &in.Bitbucket, &out.Bitbucket + *out = make(map[string]BitbucketEventSource, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.RedisStream != nil { + in, out := &in.RedisStream, &out.RedisStream + *out = make(map[string]RedisStreamEventSource, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.AzureServiceBus != nil { + in, out := &in.AzureServiceBus, &out.AzureServiceBus + *out = make(map[string]AzureServiceBusEventSource, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.AzureQueueStorage != nil { + in, out := &in.AzureQueueStorage, &out.AzureQueueStorage + *out = make(map[string]AzureQueueStorageEventSource, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.SFTP != nil { + in, out := &in.SFTP, &out.SFTP + *out = make(map[string]SFTPEventSource, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Gerrit != nil { + in, out := &in.Gerrit, &out.Gerrit + *out = make(map[string]GerritEventSource, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } return } @@ -599,15 +949,43 @@ func (in *EventSourceStatus) DeepCopy() *EventSourceStatus { if in == nil { return nil } - out := new(EventSourceStatus) + out := new(EventSourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FileEventSource) DeepCopyInto(out *FileEventSource) { + *out = *in + out.WatchPathConfig = in.WatchPathConfig + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileEventSource. +func (in *FileEventSource) DeepCopy() *FileEventSource { + if in == nil { + return nil + } + out := new(FileEventSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FileEventSource) DeepCopyInto(out *FileEventSource) { +func (in *GenericEventSource) DeepCopyInto(out *GenericEventSource) { *out = *in - out.WatchPathConfig = in.WatchPathConfig if in.Metadata != nil { in, out := &in.Metadata, &out.Metadata *out = make(map[string]string, len(*in)) @@ -615,22 +993,47 @@ func (in *FileEventSource) DeepCopyInto(out *FileEventSource) { (*out)[key] = val } } + if in.AuthSecret != nil { + in, out := &in.AuthSecret, &out.AuthSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileEventSource. -func (in *FileEventSource) DeepCopy() *FileEventSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericEventSource. +func (in *GenericEventSource) DeepCopy() *GenericEventSource { if in == nil { return nil } - out := new(FileEventSource) + out := new(GenericEventSource) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GenericEventSource) DeepCopyInto(out *GenericEventSource) { +func (in *GerritEventSource) DeepCopyInto(out *GerritEventSource) { *out = *in + if in.Webhook != nil { + in, out := &in.Webhook, &out.Webhook + *out = new(WebhookContext) + (*in).DeepCopyInto(*out) + } + if in.Events != nil { + in, out := &in.Events, &out.Events + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(common.BasicAuth) + (*in).DeepCopyInto(*out) + } if in.Metadata != nil { in, out := &in.Metadata, &out.Metadata *out = make(map[string]string, len(*in)) @@ -638,20 +1041,46 @@ func (in *GenericEventSource) DeepCopyInto(out *GenericEventSource) { (*out)[key] = val } } - if in.AuthSecret != nil { - in, out := &in.AuthSecret, &out.AuthSecret + if in.Projects != nil { + in, out := &in.Projects, &out.Projects + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GerritEventSource. +func (in *GerritEventSource) DeepCopy() *GerritEventSource { + if in == nil { + return nil + } + out := new(GerritEventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GithubAppCreds) DeepCopyInto(out *GithubAppCreds) { + *out = *in + if in.PrivateKey != nil { + in, out := &in.PrivateKey, &out.PrivateKey *out = new(v1.SecretKeySelector) (*in).DeepCopyInto(*out) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericEventSource. -func (in *GenericEventSource) DeepCopy() *GenericEventSource { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GithubAppCreds. +func (in *GithubAppCreds) DeepCopy() *GithubAppCreds { if in == nil { return nil } - out := new(GenericEventSource) + out := new(GithubAppCreds) in.DeepCopyInto(out) return out } @@ -693,6 +1122,22 @@ func (in *GithubEventSource) DeepCopyInto(out *GithubEventSource) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GithubApp != nil { + in, out := &in.GithubApp, &out.GithubApp + *out = new(GithubAppCreds) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + out.PayloadEnrichment = in.PayloadEnrichment return } @@ -731,6 +1176,26 @@ func (in *GitlabEventSource) DeepCopyInto(out *GitlabEventSource) { (*out)[key] = val } } + if in.Projects != nil { + in, out := &in.Projects, &out.Projects + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretToken != nil { + in, out := &in.SecretToken, &out.SecretToken + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -775,6 +1240,11 @@ func (in *HDFSEventSource) DeepCopyInto(out *HDFSEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -834,6 +1304,11 @@ func (in *KafkaEventSource) DeepCopyInto(out *KafkaEventSource) { *out = new(common.SASLConfig) (*in).DeepCopyInto(*out) } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -867,6 +1342,16 @@ func (in *MQTTEventSource) DeepCopyInto(out *MQTTEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + if in.Auth != nil { + in, out := &in.Auth, &out.Auth + *out = new(common.BasicAuth) + (*in).DeepCopyInto(*out) + } return } @@ -941,6 +1426,16 @@ func (in *NATSEventsSource) DeepCopyInto(out *NATSEventsSource) { *out = new(NATSAuth) (*in).DeepCopyInto(*out) } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(string) + **out = **in + } return } @@ -974,6 +1469,11 @@ func (in *NSQEventSource) DeepCopyInto(out *NSQEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -1008,6 +1508,22 @@ func (in *OwnedRepositories) DeepCopy() *OwnedRepositories { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PayloadEnrichmentFlags) DeepCopyInto(out *PayloadEnrichmentFlags) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PayloadEnrichmentFlags. +func (in *PayloadEnrichmentFlags) DeepCopy() *PayloadEnrichmentFlags { + if in == nil { + return nil + } + out := new(PayloadEnrichmentFlags) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PubSubEventSource) DeepCopyInto(out *PubSubEventSource) { *out = *in @@ -1023,6 +1539,11 @@ func (in *PubSubEventSource) DeepCopyInto(out *PubSubEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -1066,6 +1587,28 @@ func (in *PulsarEventSource) DeepCopyInto(out *PulsarEventSource) { (*out)[key] = val } } + if in.AuthTokenSecret != nil { + in, out := &in.AuthTokenSecret, &out.AuthTokenSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + if in.AuthAthenzParams != nil { + in, out := &in.AuthAthenzParams, &out.AuthAthenzParams + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AuthAthenzSecret != nil { + in, out := &in.AuthAthenzSecret, &out.AuthAthenzSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } return } @@ -1104,6 +1647,11 @@ func (in *RedisEventSource) DeepCopyInto(out *RedisEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -1117,6 +1665,49 @@ func (in *RedisEventSource) DeepCopy() *RedisEventSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedisStreamEventSource) DeepCopyInto(out *RedisStreamEventSource) { + *out = *in + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Streams != nil { + in, out := &in.Streams, &out.Streams + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(common.TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedisStreamEventSource. +func (in *RedisStreamEventSource) DeepCopy() *RedisStreamEventSource { + if in == nil { + return nil + } + out := new(RedisStreamEventSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceEventSource) DeepCopyInto(out *ResourceEventSource) { *out = *in @@ -1178,6 +1769,55 @@ func (in *ResourceFilter) DeepCopy() *ResourceFilter { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SFTPEventSource) DeepCopyInto(out *SFTPEventSource) { + *out = *in + out.WatchPathConfig = in.WatchPathConfig + if in.Username != nil { + in, out := &in.Username, &out.Username + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Password != nil { + in, out := &in.Password, &out.Password + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SSHKeySecret != nil { + in, out := &in.SSHKeySecret, &out.SSHKeySecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Address != nil { + in, out := &in.Address, &out.Address + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SFTPEventSource. +func (in *SFTPEventSource) DeepCopy() *SFTPEventSource { + if in == nil { + return nil + } + out := new(SFTPEventSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SNSEventSource) DeepCopyInto(out *SNSEventSource) { *out = *in @@ -1203,6 +1843,11 @@ func (in *SNSEventSource) DeepCopyInto(out *SNSEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -1236,6 +1881,16 @@ func (in *SQSEventSource) DeepCopyInto(out *SQSEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + if in.SessionToken != nil { + in, out := &in.SessionToken, &out.SessionToken + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } return } @@ -1313,6 +1968,11 @@ func (in *SlackEventSource) DeepCopyInto(out *SlackEventSource) { (*out)[key] = val } } + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } return } @@ -1531,6 +2191,11 @@ func (in *WebhookContext) DeepCopyInto(out *WebhookContext) { *out = new(v1.SecretKeySelector) (*in).DeepCopyInto(*out) } + if in.MaxPayloadSize != nil { + in, out := &in.MaxPayloadSize, &out.MaxPayloadSize + *out = new(int64) + **out = **in + } return } @@ -1543,3 +2208,25 @@ func (in *WebhookContext) DeepCopy() *WebhookContext { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookEventSource) DeepCopyInto(out *WebhookEventSource) { + *out = *in + in.WebhookContext.DeepCopyInto(&out.WebhookContext) + if in.Filter != nil { + in, out := &in.Filter, &out.Filter + *out = new(EventSourceFilter) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookEventSource. +func (in *WebhookEventSource) DeepCopy() *WebhookEventSource { + if in == nil { + return nil + } + out := new(WebhookEventSource) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/sensor/v1alpha1/generated.pb.go b/pkg/apis/sensor/v1alpha1/generated.pb.go index dbb2b0b4fc..fb0bb83efe 100644 --- a/pkg/apis/sensor/v1alpha1/generated.pb.go +++ b/pkg/apis/sensor/v1alpha1/generated.pb.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -160,10 +160,94 @@ func (m *AzureEventHubsTrigger) XXX_DiscardUnknown() { var xxx_messageInfo_AzureEventHubsTrigger proto.InternalMessageInfo +func (m *AzureServiceBusTrigger) Reset() { *m = AzureServiceBusTrigger{} } +func (*AzureServiceBusTrigger) ProtoMessage() {} +func (*AzureServiceBusTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{4} +} +func (m *AzureServiceBusTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureServiceBusTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureServiceBusTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureServiceBusTrigger.Merge(m, src) +} +func (m *AzureServiceBusTrigger) XXX_Size() int { + return m.Size() +} +func (m *AzureServiceBusTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_AzureServiceBusTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureServiceBusTrigger proto.InternalMessageInfo + +func (m *ConditionsResetByTime) Reset() { *m = ConditionsResetByTime{} } +func (*ConditionsResetByTime) ProtoMessage() {} +func (*ConditionsResetByTime) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{5} +} +func (m *ConditionsResetByTime) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConditionsResetByTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConditionsResetByTime) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConditionsResetByTime.Merge(m, src) +} +func (m *ConditionsResetByTime) XXX_Size() int { + return m.Size() +} +func (m *ConditionsResetByTime) XXX_DiscardUnknown() { + xxx_messageInfo_ConditionsResetByTime.DiscardUnknown(m) +} + +var xxx_messageInfo_ConditionsResetByTime proto.InternalMessageInfo + +func (m *ConditionsResetCriteria) Reset() { *m = ConditionsResetCriteria{} } +func (*ConditionsResetCriteria) ProtoMessage() {} +func (*ConditionsResetCriteria) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{6} +} +func (m *ConditionsResetCriteria) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConditionsResetCriteria) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConditionsResetCriteria) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConditionsResetCriteria.Merge(m, src) +} +func (m *ConditionsResetCriteria) XXX_Size() int { + return m.Size() +} +func (m *ConditionsResetCriteria) XXX_DiscardUnknown() { + xxx_messageInfo_ConditionsResetCriteria.DiscardUnknown(m) +} + +var xxx_messageInfo_ConditionsResetCriteria proto.InternalMessageInfo + func (m *CustomTrigger) Reset() { *m = CustomTrigger{} } func (*CustomTrigger) ProtoMessage() {} func (*CustomTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{4} + return fileDescriptor_6c4bded897df1f16, []int{7} } func (m *CustomTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -191,7 +275,7 @@ var xxx_messageInfo_CustomTrigger proto.InternalMessageInfo func (m *DataFilter) Reset() { *m = DataFilter{} } func (*DataFilter) ProtoMessage() {} func (*DataFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{5} + return fileDescriptor_6c4bded897df1f16, []int{8} } func (m *DataFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -216,15 +300,15 @@ func (m *DataFilter) XXX_DiscardUnknown() { var xxx_messageInfo_DataFilter proto.InternalMessageInfo -func (m *DependencyGroup) Reset() { *m = DependencyGroup{} } -func (*DependencyGroup) ProtoMessage() {} -func (*DependencyGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{6} +func (m *EmailTrigger) Reset() { *m = EmailTrigger{} } +func (*EmailTrigger) ProtoMessage() {} +func (*EmailTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{9} } -func (m *DependencyGroup) XXX_Unmarshal(b []byte) error { +func (m *EmailTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *DependencyGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *EmailTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { @@ -232,22 +316,22 @@ func (m *DependencyGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, err } return b[:n], nil } -func (m *DependencyGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_DependencyGroup.Merge(m, src) +func (m *EmailTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmailTrigger.Merge(m, src) } -func (m *DependencyGroup) XXX_Size() int { +func (m *EmailTrigger) XXX_Size() int { return m.Size() } -func (m *DependencyGroup) XXX_DiscardUnknown() { - xxx_messageInfo_DependencyGroup.DiscardUnknown(m) +func (m *EmailTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_EmailTrigger.DiscardUnknown(m) } -var xxx_messageInfo_DependencyGroup proto.InternalMessageInfo +var xxx_messageInfo_EmailTrigger proto.InternalMessageInfo func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{7} + return fileDescriptor_6c4bded897df1f16, []int{10} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -275,7 +359,7 @@ var xxx_messageInfo_Event proto.InternalMessageInfo func (m *EventContext) Reset() { *m = EventContext{} } func (*EventContext) ProtoMessage() {} func (*EventContext) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{8} + return fileDescriptor_6c4bded897df1f16, []int{11} } func (m *EventContext) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -303,7 +387,7 @@ var xxx_messageInfo_EventContext proto.InternalMessageInfo func (m *EventDependency) Reset() { *m = EventDependency{} } func (*EventDependency) ProtoMessage() {} func (*EventDependency) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{9} + return fileDescriptor_6c4bded897df1f16, []int{12} } func (m *EventDependency) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -331,7 +415,7 @@ var xxx_messageInfo_EventDependency proto.InternalMessageInfo func (m *EventDependencyFilter) Reset() { *m = EventDependencyFilter{} } func (*EventDependencyFilter) ProtoMessage() {} func (*EventDependencyFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{10} + return fileDescriptor_6c4bded897df1f16, []int{13} } func (m *EventDependencyFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -356,10 +440,38 @@ func (m *EventDependencyFilter) XXX_DiscardUnknown() { var xxx_messageInfo_EventDependencyFilter proto.InternalMessageInfo +func (m *EventDependencyTransformer) Reset() { *m = EventDependencyTransformer{} } +func (*EventDependencyTransformer) ProtoMessage() {} +func (*EventDependencyTransformer) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{14} +} +func (m *EventDependencyTransformer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDependencyTransformer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventDependencyTransformer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDependencyTransformer.Merge(m, src) +} +func (m *EventDependencyTransformer) XXX_Size() int { + return m.Size() +} +func (m *EventDependencyTransformer) XXX_DiscardUnknown() { + xxx_messageInfo_EventDependencyTransformer.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDependencyTransformer proto.InternalMessageInfo + func (m *ExprFilter) Reset() { *m = ExprFilter{} } func (*ExprFilter) ProtoMessage() {} func (*ExprFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{11} + return fileDescriptor_6c4bded897df1f16, []int{15} } func (m *ExprFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -387,7 +499,7 @@ var xxx_messageInfo_ExprFilter proto.InternalMessageInfo func (m *FileArtifact) Reset() { *m = FileArtifact{} } func (*FileArtifact) ProtoMessage() {} func (*FileArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{12} + return fileDescriptor_6c4bded897df1f16, []int{16} } func (m *FileArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -415,7 +527,7 @@ var xxx_messageInfo_FileArtifact proto.InternalMessageInfo func (m *GitArtifact) Reset() { *m = GitArtifact{} } func (*GitArtifact) ProtoMessage() {} func (*GitArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{13} + return fileDescriptor_6c4bded897df1f16, []int{17} } func (m *GitArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -443,7 +555,7 @@ var xxx_messageInfo_GitArtifact proto.InternalMessageInfo func (m *GitCreds) Reset() { *m = GitCreds{} } func (*GitCreds) ProtoMessage() {} func (*GitCreds) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{14} + return fileDescriptor_6c4bded897df1f16, []int{18} } func (m *GitCreds) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -471,7 +583,7 @@ var xxx_messageInfo_GitCreds proto.InternalMessageInfo func (m *GitRemoteConfig) Reset() { *m = GitRemoteConfig{} } func (*GitRemoteConfig) ProtoMessage() {} func (*GitRemoteConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{15} + return fileDescriptor_6c4bded897df1f16, []int{19} } func (m *GitRemoteConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -499,7 +611,7 @@ var xxx_messageInfo_GitRemoteConfig proto.InternalMessageInfo func (m *HTTPTrigger) Reset() { *m = HTTPTrigger{} } func (*HTTPTrigger) ProtoMessage() {} func (*HTTPTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{16} + return fileDescriptor_6c4bded897df1f16, []int{20} } func (m *HTTPTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -527,7 +639,7 @@ var xxx_messageInfo_HTTPTrigger proto.InternalMessageInfo func (m *K8SResourcePolicy) Reset() { *m = K8SResourcePolicy{} } func (*K8SResourcePolicy) ProtoMessage() {} func (*K8SResourcePolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{17} + return fileDescriptor_6c4bded897df1f16, []int{21} } func (m *K8SResourcePolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -555,7 +667,7 @@ var xxx_messageInfo_K8SResourcePolicy proto.InternalMessageInfo func (m *KafkaTrigger) Reset() { *m = KafkaTrigger{} } func (*KafkaTrigger) ProtoMessage() {} func (*KafkaTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{18} + return fileDescriptor_6c4bded897df1f16, []int{22} } func (m *KafkaTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -583,7 +695,7 @@ var xxx_messageInfo_KafkaTrigger proto.InternalMessageInfo func (m *LogTrigger) Reset() { *m = LogTrigger{} } func (*LogTrigger) ProtoMessage() {} func (*LogTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{19} + return fileDescriptor_6c4bded897df1f16, []int{23} } func (m *LogTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -611,7 +723,7 @@ var xxx_messageInfo_LogTrigger proto.InternalMessageInfo func (m *NATSTrigger) Reset() { *m = NATSTrigger{} } func (*NATSTrigger) ProtoMessage() {} func (*NATSTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{20} + return fileDescriptor_6c4bded897df1f16, []int{24} } func (m *NATSTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -639,7 +751,7 @@ var xxx_messageInfo_NATSTrigger proto.InternalMessageInfo func (m *OpenWhiskTrigger) Reset() { *m = OpenWhiskTrigger{} } func (*OpenWhiskTrigger) ProtoMessage() {} func (*OpenWhiskTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{21} + return fileDescriptor_6c4bded897df1f16, []int{25} } func (m *OpenWhiskTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -667,7 +779,7 @@ var xxx_messageInfo_OpenWhiskTrigger proto.InternalMessageInfo func (m *PayloadField) Reset() { *m = PayloadField{} } func (*PayloadField) ProtoMessage() {} func (*PayloadField) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{22} + return fileDescriptor_6c4bded897df1f16, []int{26} } func (m *PayloadField) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -692,10 +804,66 @@ func (m *PayloadField) XXX_DiscardUnknown() { var xxx_messageInfo_PayloadField proto.InternalMessageInfo +func (m *PulsarTrigger) Reset() { *m = PulsarTrigger{} } +func (*PulsarTrigger) ProtoMessage() {} +func (*PulsarTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{27} +} +func (m *PulsarTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PulsarTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PulsarTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_PulsarTrigger.Merge(m, src) +} +func (m *PulsarTrigger) XXX_Size() int { + return m.Size() +} +func (m *PulsarTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_PulsarTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_PulsarTrigger proto.InternalMessageInfo + +func (m *RateLimit) Reset() { *m = RateLimit{} } +func (*RateLimit) ProtoMessage() {} +func (*RateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{28} +} +func (m *RateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimit.Merge(m, src) +} +func (m *RateLimit) XXX_Size() int { + return m.Size() +} +func (m *RateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimit proto.InternalMessageInfo + func (m *Sensor) Reset() { *m = Sensor{} } func (*Sensor) ProtoMessage() {} func (*Sensor) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{23} + return fileDescriptor_6c4bded897df1f16, []int{29} } func (m *Sensor) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +891,7 @@ var xxx_messageInfo_Sensor proto.InternalMessageInfo func (m *SensorList) Reset() { *m = SensorList{} } func (*SensorList) ProtoMessage() {} func (*SensorList) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{24} + return fileDescriptor_6c4bded897df1f16, []int{30} } func (m *SensorList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +919,7 @@ var xxx_messageInfo_SensorList proto.InternalMessageInfo func (m *SensorSpec) Reset() { *m = SensorSpec{} } func (*SensorSpec) ProtoMessage() {} func (*SensorSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{25} + return fileDescriptor_6c4bded897df1f16, []int{31} } func (m *SensorSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -779,7 +947,7 @@ var xxx_messageInfo_SensorSpec proto.InternalMessageInfo func (m *SensorStatus) Reset() { *m = SensorStatus{} } func (*SensorStatus) ProtoMessage() {} func (*SensorStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{26} + return fileDescriptor_6c4bded897df1f16, []int{32} } func (m *SensorStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -804,10 +972,66 @@ func (m *SensorStatus) XXX_DiscardUnknown() { var xxx_messageInfo_SensorStatus proto.InternalMessageInfo +func (m *SlackSender) Reset() { *m = SlackSender{} } +func (*SlackSender) ProtoMessage() {} +func (*SlackSender) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{33} +} +func (m *SlackSender) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SlackSender) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SlackSender) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlackSender.Merge(m, src) +} +func (m *SlackSender) XXX_Size() int { + return m.Size() +} +func (m *SlackSender) XXX_DiscardUnknown() { + xxx_messageInfo_SlackSender.DiscardUnknown(m) +} + +var xxx_messageInfo_SlackSender proto.InternalMessageInfo + +func (m *SlackThread) Reset() { *m = SlackThread{} } +func (*SlackThread) ProtoMessage() {} +func (*SlackThread) Descriptor() ([]byte, []int) { + return fileDescriptor_6c4bded897df1f16, []int{34} +} +func (m *SlackThread) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SlackThread) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SlackThread) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlackThread.Merge(m, src) +} +func (m *SlackThread) XXX_Size() int { + return m.Size() +} +func (m *SlackThread) XXX_DiscardUnknown() { + xxx_messageInfo_SlackThread.DiscardUnknown(m) +} + +var xxx_messageInfo_SlackThread proto.InternalMessageInfo + func (m *SlackTrigger) Reset() { *m = SlackTrigger{} } func (*SlackTrigger) ProtoMessage() {} func (*SlackTrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{27} + return fileDescriptor_6c4bded897df1f16, []int{35} } func (m *SlackTrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -835,7 +1059,7 @@ var xxx_messageInfo_SlackTrigger proto.InternalMessageInfo func (m *StandardK8STrigger) Reset() { *m = StandardK8STrigger{} } func (*StandardK8STrigger) ProtoMessage() {} func (*StandardK8STrigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{28} + return fileDescriptor_6c4bded897df1f16, []int{36} } func (m *StandardK8STrigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -863,7 +1087,7 @@ var xxx_messageInfo_StandardK8STrigger proto.InternalMessageInfo func (m *StatusPolicy) Reset() { *m = StatusPolicy{} } func (*StatusPolicy) ProtoMessage() {} func (*StatusPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{29} + return fileDescriptor_6c4bded897df1f16, []int{37} } func (m *StatusPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -891,7 +1115,7 @@ var xxx_messageInfo_StatusPolicy proto.InternalMessageInfo func (m *Template) Reset() { *m = Template{} } func (*Template) ProtoMessage() {} func (*Template) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{30} + return fileDescriptor_6c4bded897df1f16, []int{38} } func (m *Template) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -919,7 +1143,7 @@ var xxx_messageInfo_Template proto.InternalMessageInfo func (m *TimeFilter) Reset() { *m = TimeFilter{} } func (*TimeFilter) ProtoMessage() {} func (*TimeFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{31} + return fileDescriptor_6c4bded897df1f16, []int{39} } func (m *TimeFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,7 +1171,7 @@ var xxx_messageInfo_TimeFilter proto.InternalMessageInfo func (m *Trigger) Reset() { *m = Trigger{} } func (*Trigger) ProtoMessage() {} func (*Trigger) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{32} + return fileDescriptor_6c4bded897df1f16, []int{40} } func (m *Trigger) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -975,7 +1199,7 @@ var xxx_messageInfo_Trigger proto.InternalMessageInfo func (m *TriggerParameter) Reset() { *m = TriggerParameter{} } func (*TriggerParameter) ProtoMessage() {} func (*TriggerParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{33} + return fileDescriptor_6c4bded897df1f16, []int{41} } func (m *TriggerParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +1227,7 @@ var xxx_messageInfo_TriggerParameter proto.InternalMessageInfo func (m *TriggerParameterSource) Reset() { *m = TriggerParameterSource{} } func (*TriggerParameterSource) ProtoMessage() {} func (*TriggerParameterSource) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{34} + return fileDescriptor_6c4bded897df1f16, []int{42} } func (m *TriggerParameterSource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1031,7 +1255,7 @@ var xxx_messageInfo_TriggerParameterSource proto.InternalMessageInfo func (m *TriggerPolicy) Reset() { *m = TriggerPolicy{} } func (*TriggerPolicy) ProtoMessage() {} func (*TriggerPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{35} + return fileDescriptor_6c4bded897df1f16, []int{43} } func (m *TriggerPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1056,38 +1280,10 @@ func (m *TriggerPolicy) XXX_DiscardUnknown() { var xxx_messageInfo_TriggerPolicy proto.InternalMessageInfo -func (m *TriggerSwitch) Reset() { *m = TriggerSwitch{} } -func (*TriggerSwitch) ProtoMessage() {} -func (*TriggerSwitch) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{36} -} -func (m *TriggerSwitch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TriggerSwitch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TriggerSwitch) XXX_Merge(src proto.Message) { - xxx_messageInfo_TriggerSwitch.Merge(m, src) -} -func (m *TriggerSwitch) XXX_Size() int { - return m.Size() -} -func (m *TriggerSwitch) XXX_DiscardUnknown() { - xxx_messageInfo_TriggerSwitch.DiscardUnknown(m) -} - -var xxx_messageInfo_TriggerSwitch proto.InternalMessageInfo - func (m *TriggerTemplate) Reset() { *m = TriggerTemplate{} } func (*TriggerTemplate) ProtoMessage() {} func (*TriggerTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{37} + return fileDescriptor_6c4bded897df1f16, []int{44} } func (m *TriggerTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1115,7 +1311,7 @@ var xxx_messageInfo_TriggerTemplate proto.InternalMessageInfo func (m *URLArtifact) Reset() { *m = URLArtifact{} } func (*URLArtifact) ProtoMessage() {} func (*URLArtifact) Descriptor() ([]byte, []int) { - return fileDescriptor_6c4bded897df1f16, []int{38} + return fileDescriptor_6c4bded897df1f16, []int{45} } func (m *URLArtifact) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1145,14 +1341,18 @@ func init() { proto.RegisterType((*ArgoWorkflowTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.ArgoWorkflowTrigger") proto.RegisterType((*ArtifactLocation)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.ArtifactLocation") proto.RegisterType((*AzureEventHubsTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.AzureEventHubsTrigger") + proto.RegisterType((*AzureServiceBusTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.AzureServiceBusTrigger") + proto.RegisterType((*ConditionsResetByTime)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.ConditionsResetByTime") + proto.RegisterType((*ConditionsResetCriteria)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.ConditionsResetCriteria") proto.RegisterType((*CustomTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.CustomTrigger") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.CustomTrigger.SpecEntry") proto.RegisterType((*DataFilter)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.DataFilter") - proto.RegisterType((*DependencyGroup)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.DependencyGroup") + proto.RegisterType((*EmailTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.EmailTrigger") proto.RegisterType((*Event)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.Event") proto.RegisterType((*EventContext)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.EventContext") proto.RegisterType((*EventDependency)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.EventDependency") proto.RegisterType((*EventDependencyFilter)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.EventDependencyFilter") + proto.RegisterType((*EventDependencyTransformer)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.EventDependencyTransformer") proto.RegisterType((*ExprFilter)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.ExprFilter") proto.RegisterType((*FileArtifact)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.FileArtifact") proto.RegisterType((*GitArtifact)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.GitArtifact") @@ -1167,10 +1367,16 @@ func init() { proto.RegisterType((*NATSTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.NATSTrigger") proto.RegisterType((*OpenWhiskTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.OpenWhiskTrigger") proto.RegisterType((*PayloadField)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.PayloadField") + proto.RegisterType((*PulsarTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.PulsarTrigger") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.PulsarTrigger.AuthAthenzParamsEntry") + proto.RegisterType((*RateLimit)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.RateLimit") proto.RegisterType((*Sensor)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.Sensor") proto.RegisterType((*SensorList)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.SensorList") proto.RegisterType((*SensorSpec)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.SensorSpec") + proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.SensorSpec.LoggingFieldsEntry") proto.RegisterType((*SensorStatus)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.SensorStatus") + proto.RegisterType((*SlackSender)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.SlackSender") + proto.RegisterType((*SlackThread)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.SlackThread") proto.RegisterType((*SlackTrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.SlackTrigger") proto.RegisterType((*StandardK8STrigger)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.StandardK8STrigger") proto.RegisterType((*StatusPolicy)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.StatusPolicy") @@ -1181,7 +1387,6 @@ func init() { proto.RegisterType((*TriggerParameter)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.TriggerParameter") proto.RegisterType((*TriggerParameterSource)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.TriggerParameterSource") proto.RegisterType((*TriggerPolicy)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.TriggerPolicy") - proto.RegisterType((*TriggerSwitch)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.TriggerSwitch") proto.RegisterType((*TriggerTemplate)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.TriggerTemplate") proto.RegisterType((*URLArtifact)(nil), "github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1.URLArtifact") } @@ -1191,256 +1396,318 @@ func init() { } var fileDescriptor_6c4bded897df1f16 = []byte{ - // 3984 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3b, 0x4b, 0x6c, 0x23, 0xc9, - 0x75, 0xc3, 0xaf, 0xc8, 0x27, 0xea, 0x33, 0x35, 0x33, 0x6b, 0xae, 0xb2, 0x2b, 0x0e, 0x68, 0xc4, - 0x19, 0x1b, 0x36, 0xb5, 0xbb, 0xe3, 0xc4, 0xf2, 0x24, 0xb1, 0x97, 0xd4, 0x67, 0x3e, 0xd2, 0x8c, - 0xb4, 0xd5, 0xd2, 0x2e, 0xf2, 0x01, 0xd6, 0xad, 0x66, 0x91, 0xec, 0x55, 0xb3, 0xbb, 0xb7, 0xab, - 0xa8, 0x59, 0x06, 0x48, 0x62, 0xc0, 0xc8, 0x21, 0x48, 0x10, 0xe7, 0x98, 0x53, 0x92, 0x4b, 0x6e, - 0xb9, 0xe5, 0x18, 0x24, 0x07, 0x9f, 0xf6, 0x16, 0xe7, 0x10, 0xc0, 0x87, 0x80, 0xc8, 0xca, 0x97, - 0xe4, 0x10, 0x18, 0x0b, 0xe4, 0x34, 0x97, 0x04, 0xf5, 0xeb, 0xae, 0x6e, 0x72, 0x32, 0xd2, 0x70, - 0xa0, 0x39, 0xf8, 0xc6, 0x7e, 0xef, 0xd5, 0x7b, 0x55, 0xaf, 0x5e, 0xbd, 0x5f, 0x15, 0xe1, 0x41, - 0xdf, 0x65, 0x83, 0xd1, 0x49, 0xcb, 0x09, 0x86, 0x1b, 0x76, 0xd4, 0x0f, 0xc2, 0x28, 0xf8, 0x44, - 0xfc, 0xf8, 0x16, 0x39, 0x23, 0x3e, 0xa3, 0x1b, 0xe1, 0x69, 0x7f, 0xc3, 0x0e, 0x5d, 0xba, 0x41, - 0x89, 0x4f, 0x83, 0x68, 0xe3, 0xec, 0x5d, 0xdb, 0x0b, 0x07, 0xf6, 0xbb, 0x1b, 0x7d, 0xe2, 0x93, - 0xc8, 0x66, 0xa4, 0xdb, 0x0a, 0xa3, 0x80, 0x05, 0x68, 0x33, 0xe1, 0xd4, 0xd2, 0x9c, 0xc4, 0x8f, - 0x8f, 0x25, 0xa7, 0x56, 0x78, 0xda, 0x6f, 0x71, 0x4e, 0x2d, 0xc9, 0xa9, 0xa5, 0x39, 0xad, 0x7d, - 0xff, 0xc2, 0x73, 0x70, 0x82, 0xe1, 0x30, 0xf0, 0xb3, 0xa2, 0xd7, 0xbe, 0x65, 0x30, 0xe8, 0x07, - 0xfd, 0x60, 0x43, 0x80, 0x4f, 0x46, 0x3d, 0xf1, 0x25, 0x3e, 0xc4, 0x2f, 0x45, 0xde, 0x3c, 0xdd, - 0xa4, 0x2d, 0x37, 0xe0, 0x2c, 0x37, 0x9c, 0x20, 0x22, 0x1b, 0x67, 0x53, 0xab, 0x59, 0xfb, 0x76, - 0x42, 0x33, 0xb4, 0x9d, 0x81, 0xeb, 0x93, 0x68, 0x9c, 0xcc, 0x63, 0x48, 0x98, 0x3d, 0x6b, 0xd4, - 0xc6, 0xf3, 0x46, 0x45, 0x23, 0x9f, 0xb9, 0x43, 0x32, 0x35, 0xe0, 0x37, 0x5e, 0x34, 0x80, 0x3a, - 0x03, 0x32, 0xb4, 0xb3, 0xe3, 0x9a, 0xff, 0x52, 0x84, 0xd5, 0xf6, 0x47, 0xd6, 0xbe, 0x3d, 0x3c, - 0xe9, 0xda, 0x47, 0x91, 0xdb, 0xef, 0x93, 0x08, 0x6d, 0x42, 0xad, 0x37, 0xf2, 0x1d, 0xe6, 0x06, - 0xfe, 0x13, 0x7b, 0x48, 0xea, 0xb9, 0xdb, 0xb9, 0x3b, 0xd5, 0xce, 0xcd, 0xcf, 0x27, 0x8d, 0x6b, - 0xe7, 0x93, 0x46, 0x6d, 0xd7, 0xc0, 0xe1, 0x14, 0x25, 0xc2, 0x50, 0xb5, 0x1d, 0x87, 0x50, 0xba, - 0x47, 0xc6, 0xf5, 0xfc, 0xed, 0xdc, 0x9d, 0xc5, 0xf7, 0x7e, 0xb5, 0x25, 0xa7, 0xc6, 0xb7, 0xac, - 0xc5, 0xb5, 0xd4, 0x3a, 0x7b, 0xb7, 0x65, 0x11, 0x27, 0x22, 0x6c, 0x8f, 0x8c, 0x2d, 0xe2, 0x11, - 0x87, 0x05, 0x51, 0x67, 0xe9, 0x7c, 0xd2, 0xa8, 0xb6, 0xf5, 0x58, 0x9c, 0xb0, 0xe1, 0x3c, 0xa9, - 0x26, 0xaf, 0x17, 0x2e, 0xcd, 0x33, 0x06, 0xe3, 0x84, 0x0d, 0xfa, 0x1a, 0x94, 0x23, 0xd2, 0x77, - 0x03, 0xbf, 0x5e, 0x14, 0x6b, 0x5b, 0x56, 0x6b, 0x2b, 0x63, 0x01, 0xc5, 0x0a, 0x8b, 0x46, 0xb0, - 0x10, 0xda, 0x63, 0x2f, 0xb0, 0xbb, 0xf5, 0xd2, 0xed, 0xc2, 0x9d, 0xc5, 0xf7, 0x1e, 0xb5, 0x5e, - 0xd6, 0x3a, 0x5b, 0x4a, 0xbb, 0x87, 0x76, 0x64, 0x0f, 0x09, 0x23, 0x51, 0x67, 0x45, 0x09, 0x5d, - 0x38, 0x94, 0x22, 0xb0, 0x96, 0x85, 0xfe, 0x08, 0x20, 0xd4, 0x64, 0xb4, 0x5e, 0x7e, 0xe5, 0x92, - 0x91, 0x92, 0x0c, 0x31, 0x88, 0x62, 0x43, 0x22, 0xba, 0x07, 0xcb, 0xae, 0x7f, 0x16, 0x38, 0x36, - 0xdf, 0xd8, 0xa3, 0x71, 0x48, 0xea, 0x0b, 0x42, 0x4d, 0xe8, 0x7c, 0xd2, 0x58, 0x7e, 0x98, 0xc2, - 0xe0, 0x0c, 0x65, 0x73, 0x52, 0x80, 0x1b, 0xed, 0xa8, 0x1f, 0x7c, 0x14, 0x44, 0xa7, 0x3d, 0x2f, - 0x78, 0xaa, 0x8d, 0xca, 0x87, 0x32, 0x0d, 0x46, 0x91, 0x23, 0xcd, 0x69, 0xae, 0xf5, 0xb4, 0x23, - 0xe6, 0xf6, 0x6c, 0x87, 0xed, 0x2b, 0xb9, 0x1d, 0xe0, 0x5b, 0x67, 0x09, 0xee, 0x58, 0x49, 0x41, - 0x0f, 0xa0, 0x1a, 0x84, 0xdc, 0xd6, 0xf9, 0x2e, 0xe7, 0xc5, 0xf4, 0xbf, 0xa1, 0x96, 0x5d, 0x3d, - 0xd0, 0x88, 0x67, 0x93, 0xc6, 0x2d, 0x73, 0xb2, 0x31, 0x02, 0x27, 0x83, 0x33, 0xbb, 0x51, 0xb8, - 0xf2, 0xdd, 0xf8, 0xf3, 0x1c, 0xdc, 0xec, 0x47, 0xc1, 0x28, 0xfc, 0x90, 0x44, 0x94, 0xcf, 0x8d, - 0x28, 0x45, 0x16, 0x85, 0x22, 0xef, 0x19, 0x87, 0x21, 0x3e, 0xfb, 0x89, 0x78, 0xee, 0x62, 0xf8, - 0xf1, 0xb8, 0x3f, 0x83, 0x43, 0xe7, 0x2d, 0x25, 0xfa, 0xe6, 0x2c, 0x2c, 0x9e, 0x29, 0xb5, 0xf9, - 0x25, 0x77, 0x19, 0x99, 0x1d, 0x40, 0x16, 0xe4, 0xe9, 0x5d, 0xb5, 0xb3, 0xbf, 0x79, 0x71, 0xdd, - 0x48, 0x3f, 0xdc, 0xb2, 0xee, 0x6a, 0x86, 0x9d, 0xf2, 0xf9, 0xa4, 0x91, 0xb7, 0xee, 0xe2, 0x3c, - 0xbd, 0x8b, 0x9a, 0x50, 0x76, 0x7d, 0xcf, 0xf5, 0x89, 0xda, 0x3f, 0xb1, 0xcd, 0x0f, 0x05, 0x04, - 0x2b, 0x0c, 0xea, 0x42, 0xb1, 0xe7, 0x7a, 0x44, 0x39, 0x86, 0xdd, 0x97, 0xdf, 0x96, 0x5d, 0xd7, - 0x23, 0xf1, 0x2c, 0x2a, 0xe7, 0x93, 0x46, 0x91, 0x43, 0xb0, 0xe0, 0x8e, 0x7e, 0x00, 0x85, 0x51, - 0xe4, 0x29, 0x85, 0xef, 0xbc, 0xbc, 0x90, 0x63, 0xbc, 0x1f, 0xcb, 0x58, 0x38, 0x9f, 0x34, 0x0a, - 0xc7, 0x78, 0x1f, 0x73, 0xd6, 0xe8, 0x18, 0xaa, 0x4e, 0xe0, 0xf7, 0xdc, 0xfe, 0xd0, 0x0e, 0xeb, - 0x25, 0x21, 0xe7, 0xce, 0x2c, 0x2f, 0xb7, 0x25, 0x88, 0x1e, 0xdb, 0xe1, 0x94, 0xa3, 0xdb, 0xd2, - 0xc3, 0x71, 0xc2, 0x89, 0x4f, 0xbc, 0xef, 0xb2, 0x7a, 0x79, 0xde, 0x89, 0xdf, 0x77, 0x59, 0x7a, - 0xe2, 0xf7, 0x5d, 0x86, 0x39, 0x6b, 0xe4, 0x40, 0x25, 0xd2, 0x06, 0xb9, 0x20, 0xc4, 0x7c, 0xf7, - 0xd2, 0xfb, 0x1f, 0xdb, 0x63, 0xed, 0x7c, 0xd2, 0xa8, 0xc4, 0xf6, 0x17, 0x33, 0x6e, 0xfe, 0x43, - 0x11, 0x6e, 0xb5, 0xff, 0x60, 0x14, 0x91, 0x1d, 0xce, 0xe0, 0xc1, 0xe8, 0x84, 0x6a, 0xb7, 0x72, - 0x1b, 0x8a, 0xbd, 0x4f, 0xbb, 0xbe, 0x8a, 0x51, 0x35, 0x65, 0xcf, 0xc5, 0xdd, 0x0f, 0xb6, 0x9f, - 0x60, 0x81, 0x41, 0x5f, 0x87, 0x85, 0xc1, 0xe8, 0x44, 0x04, 0x32, 0x69, 0x46, 0xb1, 0xdf, 0x7d, - 0x20, 0xc1, 0x58, 0xe3, 0x51, 0x08, 0x37, 0xe8, 0xc0, 0x8e, 0x48, 0x37, 0x0e, 0x44, 0x62, 0xd8, - 0xa5, 0x82, 0xce, 0x57, 0xce, 0x27, 0x8d, 0x1b, 0xd6, 0x34, 0x17, 0x3c, 0x8b, 0x35, 0xea, 0xc2, - 0x4a, 0x06, 0xac, 0x8c, 0xec, 0x82, 0xd2, 0x6e, 0x9c, 0x4f, 0x1a, 0x2b, 0x19, 0x69, 0x38, 0xcb, - 0xf2, 0x97, 0x34, 0x8c, 0x35, 0xff, 0xa7, 0x04, 0x4b, 0x5b, 0x23, 0xca, 0x82, 0xa1, 0xb6, 0x96, - 0x0d, 0x9e, 0x4b, 0x44, 0x67, 0x24, 0x3a, 0xc6, 0xfb, 0xca, 0x64, 0xae, 0xeb, 0xa0, 0x60, 0x69, - 0x04, 0x4e, 0x68, 0x78, 0xa2, 0x40, 0x89, 0x33, 0x8a, 0xa4, 0xed, 0x54, 0x92, 0x44, 0xc1, 0x12, - 0x50, 0xac, 0xb0, 0xe8, 0x18, 0xc0, 0x21, 0x11, 0x93, 0x1b, 0x74, 0x39, 0x83, 0x59, 0xe6, 0x2b, - 0xd8, 0x8a, 0x07, 0x63, 0x83, 0x11, 0x7a, 0x04, 0x48, 0xce, 0x85, 0x1b, 0xcb, 0xc1, 0x19, 0x89, - 0x22, 0xb7, 0x4b, 0x54, 0xce, 0xb2, 0xa6, 0xa6, 0x82, 0xac, 0x29, 0x0a, 0x3c, 0x63, 0x14, 0xa2, - 0x50, 0xa4, 0x21, 0x71, 0x94, 0x05, 0x7c, 0xf0, 0xf2, 0xfb, 0x90, 0x52, 0x69, 0xcb, 0x0a, 0x89, - 0xb3, 0xe3, 0xb3, 0x68, 0x9c, 0x1c, 0x3e, 0x0e, 0xc2, 0x42, 0xd8, 0x6b, 0xcf, 0x64, 0x0c, 0xcb, - 0x5f, 0xb8, 0x42, 0xcb, 0xef, 0x40, 0x8d, 0xef, 0x22, 0x8f, 0x20, 0x87, 0x36, 0x1b, 0xd4, 0x2b, - 0x62, 0xc7, 0xd6, 0x15, 0xfd, 0x1b, 0xdb, 0x24, 0x8c, 0x88, 0xc3, 0xd3, 0xf0, 0x2d, 0x83, 0x0a, - 0xa7, 0xc6, 0xac, 0x7d, 0x07, 0xaa, 0xb1, 0x6e, 0xd1, 0x2a, 0x14, 0x4e, 0xc9, 0x58, 0x9a, 0x2c, - 0xe6, 0x3f, 0xd1, 0x4d, 0x28, 0x9d, 0xd9, 0xde, 0x48, 0x39, 0x35, 0x2c, 0x3f, 0xee, 0xe5, 0x37, - 0x73, 0xcd, 0xff, 0xce, 0x01, 0x6c, 0xdb, 0xcc, 0xde, 0x75, 0x3d, 0x26, 0x3d, 0x64, 0xc8, 0xe7, - 0x90, 0xf1, 0x90, 0x42, 0xa2, 0xc0, 0xa0, 0x6f, 0x42, 0x91, 0xf1, 0x24, 0x4f, 0xba, 0xc7, 0xba, - 0xa6, 0xe0, 0xe9, 0xdc, 0xb3, 0x49, 0xa3, 0xf2, 0xc8, 0x3a, 0x78, 0x22, 0x52, 0x3d, 0x41, 0x85, - 0x1a, 0x5a, 0x30, 0xcf, 0x84, 0xaa, 0x9d, 0xea, 0xf9, 0xa4, 0x51, 0xfa, 0x90, 0x03, 0xd4, 0x1c, - 0xd0, 0xfb, 0x00, 0x4e, 0x30, 0xe4, 0x9b, 0xc0, 0x82, 0x48, 0x19, 0xeb, 0x6d, 0xbd, 0x4f, 0x5b, - 0x31, 0xe6, 0x59, 0xea, 0x0b, 0x1b, 0x63, 0xd0, 0x37, 0xa1, 0xc2, 0xc8, 0x30, 0xf4, 0x6c, 0x46, - 0x44, 0x2c, 0xac, 0x76, 0x56, 0xd5, 0xf8, 0xca, 0x91, 0x82, 0xe3, 0x98, 0xa2, 0xe9, 0xc2, 0xca, - 0x36, 0x09, 0x89, 0xdf, 0x25, 0xbe, 0x33, 0x16, 0x89, 0x0c, 0x5f, 0xb3, 0x9f, 0x54, 0x2e, 0xf1, - 0x9a, 0x85, 0x1f, 0x16, 0x18, 0xf4, 0x6d, 0xa8, 0x75, 0xf5, 0x20, 0x97, 0xd0, 0x7a, 0x5e, 0x2c, - 0x66, 0x95, 0xd7, 0x37, 0xdb, 0x06, 0x1c, 0xa7, 0xa8, 0x9a, 0x7f, 0x9d, 0x83, 0x92, 0x08, 0x41, - 0x68, 0x08, 0x0b, 0x4e, 0xe0, 0x33, 0xf2, 0x19, 0x53, 0x59, 0xcf, 0x1c, 0xa9, 0x87, 0xe0, 0xb8, - 0x25, 0xb9, 0x75, 0x16, 0xb9, 0x41, 0xa9, 0x0f, 0xac, 0x65, 0xa0, 0xb7, 0xa0, 0xd8, 0xb5, 0x99, - 0x2d, 0xb6, 0xa8, 0x26, 0xd3, 0x13, 0xbe, 0xc5, 0x58, 0x40, 0xef, 0x55, 0xfe, 0xea, 0x6f, 0x1b, - 0xd7, 0x7e, 0xf8, 0xef, 0xb7, 0xaf, 0x35, 0xbf, 0xcc, 0x43, 0xcd, 0x64, 0x87, 0xd6, 0x20, 0xef, - 0x76, 0x95, 0x1e, 0x40, 0xe9, 0x21, 0xff, 0x70, 0x1b, 0xe7, 0xdd, 0xae, 0x70, 0x6e, 0x32, 0x70, - 0xe7, 0xd3, 0x55, 0x50, 0x26, 0x95, 0xfe, 0x75, 0x58, 0xe4, 0x87, 0xf9, 0x4c, 0x26, 0x82, 0xc2, - 0xbb, 0x55, 0x3b, 0x37, 0x14, 0xf1, 0x22, 0x37, 0x52, 0x9d, 0x23, 0x9a, 0x74, 0x7c, 0x13, 0x84, - 0x59, 0x15, 0xd3, 0x9b, 0x60, 0x98, 0x52, 0x1b, 0x56, 0xf8, 0xfc, 0xc5, 0x22, 0x7d, 0x26, 0x88, - 0xe5, 0x76, 0x7f, 0x45, 0x11, 0xaf, 0xf0, 0x45, 0x6e, 0x49, 0xb4, 0x18, 0x97, 0xa5, 0xe7, 0xd1, - 0x9d, 0x8e, 0x4e, 0x3e, 0x21, 0x8e, 0x4c, 0x72, 0x8c, 0xe8, 0x6e, 0x49, 0x30, 0xd6, 0x78, 0xb4, - 0x0f, 0x45, 0x5e, 0x0a, 0xab, 0x2c, 0xe5, 0x1b, 0x17, 0x4b, 0x9b, 0x8f, 0xdc, 0x21, 0x31, 0xe6, - 0xee, 0x72, 0x03, 0xe2, 0x5c, 0x0c, 0x9d, 0xff, 0x4d, 0x1e, 0x56, 0x84, 0xce, 0x13, 0x2b, 0xbc, - 0x80, 0x01, 0xb6, 0x61, 0x45, 0xd8, 0x85, 0xd4, 0xb5, 0x91, 0x9e, 0xc4, 0x6b, 0xdf, 0x49, 0xa3, - 0x71, 0x96, 0x9e, 0x47, 0x33, 0x01, 0x8a, 0x93, 0x14, 0x23, 0x9a, 0xed, 0x68, 0x04, 0x4e, 0x68, - 0xd0, 0x19, 0x2c, 0xf4, 0x84, 0x53, 0xa0, 0x2a, 0xcb, 0x38, 0x98, 0xd3, 0x68, 0x93, 0x15, 0x4b, - 0x67, 0x23, 0xad, 0x57, 0xfe, 0xa6, 0x58, 0x0b, 0x6b, 0xfe, 0x53, 0x01, 0x6e, 0xcd, 0xa4, 0x47, - 0x27, 0x6a, 0x4f, 0xe4, 0x19, 0xda, 0x9e, 0xc3, 0x39, 0xbb, 0x43, 0xa2, 0xe6, 0x50, 0x49, 0xef, - 0x94, 0x79, 0x54, 0xf3, 0x57, 0x70, 0x54, 0x7b, 0xea, 0xa8, 0xca, 0x42, 0x71, 0x8e, 0x25, 0x25, - 0x3e, 0x3c, 0x31, 0xa0, 0xe4, 0xd0, 0x23, 0x17, 0x4a, 0xe4, 0xb3, 0x50, 0x6c, 0xe5, 0x9c, 0x82, - 0x76, 0x3e, 0x0b, 0x23, 0x25, 0x68, 0x49, 0x09, 0x2a, 0x71, 0x18, 0xc5, 0x52, 0x02, 0x77, 0x7b, - 0x90, 0x10, 0x71, 0xe3, 0xe6, 0xf0, 0xac, 0x71, 0x73, 0x0a, 0x2c, 0x30, 0xbc, 0xd8, 0xef, 0xb9, - 0xc4, 0xeb, 0x4a, 0xbf, 0x3a, 0x97, 0xc6, 0x55, 0x6c, 0xdd, 0xe5, 0xec, 0x12, 0x0f, 0x25, 0x3e, - 0x29, 0x56, 0x52, 0x9a, 0xef, 0x40, 0xcd, 0xac, 0xdf, 0x5e, 0x1c, 0xf3, 0x9a, 0xbf, 0x28, 0xc2, - 0xa2, 0x51, 0xd4, 0xa0, 0xb7, 0x65, 0x85, 0x27, 0x07, 0x2c, 0xaa, 0x01, 0x49, 0x79, 0xf6, 0x3d, - 0x58, 0x76, 0xbc, 0xc0, 0x27, 0xdb, 0x6e, 0x24, 0xb2, 0xb6, 0xb1, 0x3a, 0xac, 0x6f, 0x28, 0xca, - 0xe5, 0xad, 0x14, 0x16, 0x67, 0xa8, 0x91, 0x03, 0x25, 0x27, 0x22, 0x5d, 0xaa, 0x52, 0xc3, 0xce, - 0x5c, 0x95, 0xd8, 0x16, 0xe7, 0x24, 0x03, 0xaf, 0xf8, 0x89, 0x25, 0x6f, 0xf4, 0x7b, 0x50, 0xa3, - 0x74, 0x20, 0x72, 0x4b, 0x91, 0x86, 0x5e, 0xaa, 0x92, 0x10, 0xa1, 0xcf, 0xb2, 0x1e, 0xc4, 0xc3, - 0x71, 0x8a, 0x19, 0x8f, 0xc9, 0x3d, 0x9d, 0xce, 0x64, 0x62, 0x72, 0x9c, 0xc0, 0xc4, 0x14, 0x3c, - 0xb4, 0x9c, 0x44, 0xb6, 0xef, 0x0c, 0x94, 0x57, 0x8e, 0x37, 0xae, 0x23, 0xa0, 0x58, 0x61, 0xb9, - 0xda, 0x99, 0xdd, 0x57, 0xed, 0xa5, 0x58, 0xed, 0x47, 0x76, 0x1f, 0x73, 0x38, 0x47, 0x47, 0xa4, - 0xa7, 0xd2, 0xa7, 0x18, 0x8d, 0x49, 0x0f, 0x73, 0x38, 0x1a, 0x42, 0x39, 0x22, 0xc3, 0x80, 0x91, - 0x7a, 0x55, 0x2c, 0xf5, 0xe1, 0x5c, 0x6a, 0xc5, 0x82, 0x95, 0x2c, 0xa3, 0x65, 0xaf, 0x41, 0x42, - 0xb0, 0x12, 0x82, 0x7e, 0x0b, 0x40, 0xaa, 0x44, 0x28, 0x01, 0xc4, 0xa4, 0xe2, 0x0e, 0x4a, 0x92, - 0xd3, 0x49, 0x25, 0x0a, 0x85, 0x18, 0xf4, 0xcd, 0xbf, 0xcf, 0x41, 0x45, 0x6f, 0x1e, 0x3a, 0x80, - 0xca, 0x88, 0x92, 0x28, 0x8e, 0x11, 0x17, 0xde, 0x26, 0x51, 0x21, 0x1f, 0xab, 0xa1, 0x38, 0x66, - 0xc2, 0x19, 0x86, 0x36, 0xa5, 0x4f, 0x83, 0xa8, 0x7b, 0xb9, 0xc6, 0xab, 0x60, 0x78, 0xa8, 0x86, - 0xe2, 0x98, 0x49, 0xf3, 0x03, 0x58, 0xc9, 0xe8, 0xe4, 0x02, 0x41, 0xed, 0x2d, 0x28, 0x8e, 0x22, - 0x4f, 0x67, 0x53, 0xc2, 0x11, 0x1f, 0xe3, 0x7d, 0x0b, 0x0b, 0x68, 0xf3, 0xbf, 0xca, 0xb0, 0xf8, - 0xe0, 0xe8, 0xe8, 0x50, 0x57, 0x63, 0x2f, 0x38, 0x73, 0x46, 0xee, 0x9e, 0xbf, 0xc2, 0xdc, 0xfd, - 0x18, 0x0a, 0xcc, 0xd3, 0x07, 0xf5, 0xde, 0xa5, 0x7b, 0x19, 0x47, 0xfb, 0x96, 0x32, 0x21, 0xd1, - 0x27, 0x39, 0xda, 0xb7, 0x30, 0xe7, 0xc7, 0x4f, 0xc4, 0x90, 0xb0, 0x41, 0xd0, 0xcd, 0xb6, 0x9c, - 0x1f, 0x0b, 0x28, 0x56, 0xd8, 0x4c, 0xc5, 0x54, 0xba, 0xf2, 0x8a, 0xe9, 0xeb, 0xb0, 0xc0, 0xa3, - 0x66, 0x30, 0x92, 0x09, 0x55, 0x21, 0xd1, 0xd4, 0x91, 0x04, 0x63, 0x8d, 0x47, 0x7d, 0xa8, 0x9e, - 0xd8, 0xd4, 0x75, 0xda, 0x23, 0x36, 0x50, 0x59, 0xd5, 0xe5, 0xf5, 0xd5, 0xd1, 0x1c, 0x64, 0x17, - 0x2b, 0xfe, 0xc4, 0x09, 0x6f, 0xf4, 0x87, 0xb0, 0x30, 0x20, 0x76, 0x97, 0x2b, 0xa4, 0x22, 0x14, - 0x82, 0x5f, 0x5e, 0x21, 0x86, 0x01, 0xb6, 0x1e, 0x48, 0xa6, 0xb2, 0x7c, 0x4d, 0xda, 0x42, 0x12, - 0x8a, 0xb5, 0x4c, 0x74, 0x06, 0x4b, 0xb2, 0xcc, 0x57, 0x98, 0x7a, 0x55, 0x4c, 0xe2, 0xb7, 0x2f, - 0xdf, 0xe7, 0x34, 0xb8, 0x74, 0xae, 0x9f, 0x4f, 0x1a, 0x4b, 0x26, 0x84, 0xe2, 0xb4, 0x98, 0xb5, - 0x7b, 0x50, 0x33, 0x67, 0x78, 0xa9, 0x22, 0xf0, 0x4f, 0x0a, 0x70, 0x7d, 0x6f, 0xd3, 0xd2, 0xbd, - 0xb4, 0xc3, 0xc0, 0x73, 0x9d, 0x31, 0xfa, 0x63, 0x28, 0x7b, 0xf6, 0x09, 0xf1, 0x68, 0x3d, 0x27, - 0x96, 0xf0, 0xd1, 0xcb, 0xeb, 0x71, 0x8a, 0x79, 0x6b, 0x5f, 0x70, 0x96, 0xca, 0x8c, 0xad, 0x5b, - 0x02, 0xb1, 0x12, 0x8b, 0x3e, 0x86, 0x85, 0x13, 0xdb, 0x39, 0x0d, 0x7a, 0x3d, 0xe5, 0xa5, 0x36, - 0x5f, 0xc2, 0x60, 0xc4, 0x78, 0x99, 0x7d, 0xa9, 0x0f, 0xac, 0xb9, 0x22, 0x0b, 0x6e, 0x91, 0x28, - 0x0a, 0xa2, 0x03, 0x5f, 0xa1, 0x94, 0xd5, 0x8a, 0xf3, 0x5c, 0xe9, 0xbc, 0xad, 0xe6, 0x75, 0x6b, - 0x67, 0x16, 0x11, 0x9e, 0x3d, 0x76, 0xed, 0xbb, 0xb0, 0x68, 0x2c, 0xee, 0x52, 0xfb, 0xf0, 0x93, - 0x32, 0xd4, 0xf6, 0xec, 0xde, 0xa9, 0x7d, 0x41, 0xa7, 0xf7, 0x55, 0x28, 0xb1, 0x20, 0x74, 0x1d, - 0x95, 0x5f, 0xc4, 0xf9, 0xd8, 0x11, 0x07, 0x62, 0x89, 0xe3, 0x89, 0x7f, 0x68, 0x47, 0xcc, 0x65, - 0xba, 0x1c, 0x2b, 0x25, 0x89, 0xff, 0xa1, 0x46, 0xe0, 0x84, 0x26, 0xe3, 0x54, 0x8a, 0x57, 0xee, - 0x54, 0x36, 0xa1, 0x16, 0x91, 0x4f, 0x47, 0xae, 0xe8, 0x4a, 0x9e, 0x52, 0x91, 0x40, 0x94, 0x92, - 0x1b, 0x45, 0x6c, 0xe0, 0x70, 0x8a, 0x92, 0xa7, 0x1d, 0x4e, 0x30, 0x0c, 0x23, 0x42, 0xa9, 0xf0, - 0x47, 0x95, 0x24, 0xed, 0xd8, 0x52, 0x70, 0x1c, 0x53, 0xf0, 0x34, 0xad, 0xe7, 0x8d, 0xe8, 0x60, - 0x97, 0xf3, 0xe0, 0x65, 0x86, 0x70, 0x4b, 0xa5, 0x24, 0x4d, 0xdb, 0x4d, 0x61, 0x71, 0x86, 0x5a, - 0xfb, 0xfe, 0xca, 0x2b, 0xf6, 0xfd, 0x46, 0x24, 0xab, 0x5e, 0x61, 0x24, 0x6b, 0xc3, 0x4a, 0x6c, - 0x02, 0xae, 0xdf, 0xdf, 0x23, 0x63, 0x95, 0xb4, 0xc4, 0x25, 0xe6, 0x61, 0x1a, 0x8d, 0xb3, 0xf4, - 0x3c, 0x1a, 0xe8, 0xb2, 0x7f, 0x31, 0x5d, 0x5e, 0xeb, 0x92, 0x5f, 0xe3, 0xd1, 0xef, 0x40, 0x91, - 0xda, 0xd4, 0xab, 0xd7, 0x5e, 0xf6, 0x12, 0xa8, 0x6d, 0xed, 0x2b, 0xed, 0x89, 0xc4, 0x81, 0x7f, - 0x63, 0xc1, 0xb2, 0x79, 0x00, 0xb0, 0x1f, 0xf4, 0xf5, 0x09, 0x6a, 0xc3, 0x8a, 0xeb, 0x33, 0x12, - 0x9d, 0xd9, 0x9e, 0x45, 0x9c, 0xc0, 0xef, 0x52, 0x71, 0x9a, 0x8a, 0xc9, 0xb2, 0x1e, 0xa6, 0xd1, - 0x38, 0x4b, 0xdf, 0xfc, 0xbb, 0x02, 0x2c, 0x3e, 0x69, 0x1f, 0x59, 0x17, 0x3c, 0x94, 0x46, 0x93, - 0x21, 0xff, 0x82, 0x26, 0x83, 0xb1, 0xd5, 0x85, 0xd7, 0xd6, 0x6a, 0xbf, 0xfa, 0x03, 0xae, 0x0e, - 0x4e, 0xe9, 0xd5, 0x1e, 0x9c, 0xe6, 0x8f, 0x8b, 0xb0, 0x7a, 0x10, 0x12, 0xff, 0xa3, 0x81, 0x4b, - 0x4f, 0x8d, 0x2b, 0x9f, 0x41, 0x40, 0x59, 0x36, 0x0d, 0x7d, 0x10, 0x50, 0x86, 0x05, 0xc6, 0xb4, - 0xda, 0xfc, 0x0b, 0xac, 0x76, 0x03, 0xaa, 0x3c, 0x73, 0xa5, 0xa1, 0xed, 0x4c, 0xf5, 0x50, 0x9e, - 0x68, 0x04, 0x4e, 0x68, 0xc4, 0x13, 0x87, 0x11, 0x1b, 0x1c, 0x05, 0xa7, 0xc4, 0xbf, 0x5c, 0x85, - 0x25, 0x9f, 0x38, 0xe8, 0xb1, 0x38, 0x61, 0x83, 0xde, 0x03, 0xb0, 0x93, 0xe7, 0x16, 0xb2, 0xba, - 0x8a, 0x35, 0xde, 0x4e, 0x1e, 0x5b, 0x18, 0x54, 0xa6, 0xa1, 0x95, 0x5f, 0x9b, 0xa1, 0x2d, 0x5c, - 0xf9, 0x9d, 0x0e, 0x86, 0x9a, 0xd9, 0x11, 0xb8, 0x40, 0x77, 0x5b, 0x57, 0x2d, 0xf9, 0xe7, 0x55, - 0x2d, 0xcd, 0x9f, 0xe4, 0xa1, 0x6c, 0x89, 0x89, 0xa1, 0x1f, 0x40, 0x65, 0x48, 0x98, 0x2d, 0x1a, - 0x38, 0xb2, 0x2e, 0x7b, 0xe7, 0x62, 0x7d, 0xc2, 0x03, 0x71, 0xfc, 0x1f, 0x13, 0x66, 0x27, 0x4b, - 0x48, 0x60, 0x38, 0xe6, 0x8a, 0x7a, 0xea, 0x1a, 0x26, 0x3f, 0x6f, 0xc7, 0x4b, 0xce, 0xd8, 0x0a, - 0x89, 0x33, 0xf3, 0xe6, 0xc5, 0x87, 0x32, 0x65, 0x36, 0x1b, 0xd1, 0xf9, 0xaf, 0xc6, 0x95, 0x24, - 0xc1, 0xcd, 0x68, 0x12, 0x8b, 0x6f, 0xac, 0xa4, 0x34, 0xff, 0x35, 0x07, 0x20, 0x09, 0xf7, 0x5d, - 0xca, 0xd0, 0xef, 0x4f, 0x29, 0xb2, 0x75, 0x31, 0x45, 0xf2, 0xd1, 0x42, 0x8d, 0x71, 0x9c, 0xd7, - 0x10, 0x43, 0x89, 0x04, 0x4a, 0x2e, 0x23, 0x43, 0xdd, 0x5e, 0x7a, 0x7f, 0xde, 0xb5, 0x25, 0x79, - 0xd6, 0x43, 0xce, 0x16, 0x4b, 0xee, 0xcd, 0xff, 0x2c, 0xe9, 0x35, 0x71, 0xc5, 0xa2, 0x1f, 0xe5, - 0x32, 0x97, 0x06, 0x32, 0x89, 0x7e, 0xf8, 0xca, 0x9a, 0xa8, 0x49, 0x46, 0xf4, 0xfc, 0x3b, 0x08, - 0x14, 0x40, 0x85, 0xc9, 0x53, 0xa3, 0x97, 0xdf, 0x9e, 0xfb, 0xfc, 0x19, 0xf7, 0x2b, 0x8a, 0x35, - 0x8e, 0x85, 0x20, 0xcf, 0xb8, 0x8d, 0x99, 0xbb, 0x7d, 0xa5, 0xef, 0x6f, 0x64, 0xdf, 0x61, 0xfa, - 0x36, 0x07, 0xfd, 0x38, 0x07, 0xab, 0xdd, 0xf4, 0x75, 0x8e, 0x0e, 0x68, 0x73, 0x28, 0x3a, 0x73, - 0x41, 0x14, 0x5f, 0x72, 0xad, 0x66, 0x10, 0x14, 0x4f, 0x09, 0x47, 0x8f, 0x00, 0xa9, 0xb2, 0x60, - 0xd7, 0x76, 0x3d, 0xd2, 0xc5, 0xc1, 0xc8, 0xef, 0x0a, 0x2f, 0x5d, 0x49, 0x2e, 0x61, 0x77, 0xa6, - 0x28, 0xf0, 0x8c, 0x51, 0x3c, 0x11, 0x16, 0x53, 0xed, 0x8c, 0xa8, 0xf0, 0xf5, 0xe5, 0xf4, 0xd3, - 0xba, 0x1d, 0x03, 0x87, 0x53, 0x94, 0xe8, 0x2e, 0x2c, 0x38, 0x6e, 0xe4, 0x8c, 0x5c, 0xa6, 0xba, - 0x65, 0x6f, 0xaa, 0x41, 0xd7, 0x8d, 0xdb, 0x44, 0x49, 0x80, 0x35, 0x25, 0xba, 0x03, 0x95, 0x88, - 0x84, 0x9e, 0xeb, 0xd8, 0x32, 0xa9, 0x2d, 0xe9, 0x17, 0x16, 0x12, 0x86, 0x63, 0x6c, 0x33, 0x80, - 0x9a, 0x79, 0xcc, 0xd1, 0xc7, 0xb1, 0xfb, 0x90, 0xa7, 0xf7, 0x3b, 0x97, 0xcf, 0xe7, 0xfe, 0x7f, - 0x7f, 0xf1, 0x8f, 0x79, 0xa8, 0x59, 0x9e, 0xed, 0xc4, 0x61, 0x3d, 0x1d, 0x59, 0x72, 0xaf, 0x21, - 0x85, 0x01, 0x2a, 0xe6, 0x23, 0x22, 0x7b, 0xfe, 0xd2, 0x57, 0xf8, 0x56, 0x3c, 0x18, 0x1b, 0x8c, - 0x78, 0x2e, 0xe2, 0x0c, 0x6c, 0xdf, 0x27, 0x9e, 0x4a, 0x2f, 0xe2, 0xd8, 0xba, 0x25, 0xc1, 0x58, - 0xe3, 0x39, 0xe9, 0x90, 0x50, 0x6a, 0xf7, 0xf5, 0x9d, 0x59, 0x4c, 0xfa, 0x58, 0x82, 0xb1, 0xc6, - 0x37, 0xff, 0xb7, 0x08, 0xc8, 0x62, 0xb6, 0xdf, 0xb5, 0xa3, 0xee, 0xde, 0x66, 0x9c, 0xc7, 0x3e, - 0xf7, 0xa9, 0x58, 0xee, 0x75, 0x3c, 0x15, 0x33, 0xde, 0xfc, 0xe5, 0xaf, 0xe4, 0xcd, 0xdf, 0x13, - 0xf3, 0xcd, 0x9f, 0xd4, 0xf6, 0x3b, 0xb3, 0xde, 0xfc, 0xfd, 0xca, 0xde, 0xe8, 0x84, 0x44, 0x3e, - 0x61, 0x84, 0xea, 0xb9, 0x5e, 0xe0, 0xe5, 0xdf, 0xd5, 0x67, 0xd5, 0x3d, 0x58, 0x0a, 0x6d, 0xe6, - 0x0c, 0x2c, 0x16, 0xd9, 0x8c, 0xf4, 0xc7, 0x2a, 0x35, 0x7c, 0x5f, 0x0d, 0x5b, 0x3a, 0x34, 0x91, - 0xcf, 0x26, 0x8d, 0x5f, 0x7b, 0xde, 0x2b, 0x60, 0x36, 0x0e, 0x09, 0x6d, 0x09, 0x72, 0x71, 0x8d, - 0x9a, 0x66, 0xcb, 0xf3, 0x4f, 0xcf, 0x3d, 0x23, 0x07, 0xc9, 0x3d, 0x6a, 0x25, 0x99, 0xdb, 0x7e, - 0x8c, 0xc1, 0x06, 0x55, 0x73, 0x03, 0x6a, 0xf2, 0x44, 0xab, 0xd6, 0x52, 0x03, 0x4a, 0xb6, 0xe7, - 0x05, 0x4f, 0xc5, 0xc9, 0x2d, 0xc9, 0xdb, 0x89, 0x36, 0x07, 0x60, 0x09, 0x6f, 0xfe, 0x69, 0x05, - 0x62, 0x7f, 0x8f, 0x9c, 0xa9, 0xf4, 0xe0, 0xf2, 0xaf, 0xc6, 0x1e, 0x2b, 0x06, 0xd2, 0xa7, 0xe9, - 0x2f, 0x23, 0x4b, 0x50, 0xaf, 0x67, 0x5c, 0x87, 0xb4, 0x1d, 0x27, 0x18, 0xa9, 0x8b, 0xd2, 0xfc, - 0xf4, 0xeb, 0x99, 0x34, 0x05, 0x9e, 0x31, 0x0a, 0x3d, 0x12, 0xef, 0xf3, 0x98, 0xcd, 0x75, 0xaa, - 0xa2, 0xe0, 0xdb, 0xcf, 0x79, 0x9f, 0x27, 0x89, 0xe2, 0x47, 0x79, 0xf2, 0x13, 0x27, 0xc3, 0xd1, - 0x0e, 0x2c, 0x9c, 0x05, 0xde, 0x68, 0x48, 0xb4, 0x4d, 0xad, 0xcd, 0xe2, 0xf4, 0xa1, 0x20, 0x31, - 0x4a, 0x17, 0x39, 0x04, 0xeb, 0xb1, 0x88, 0xc0, 0x8a, 0xe8, 0x17, 0xba, 0x6c, 0xac, 0x2e, 0x21, - 0x55, 0xfd, 0xf5, 0xb5, 0x59, 0xec, 0x0e, 0x83, 0xae, 0x95, 0xa6, 0x56, 0x8f, 0xc7, 0xd2, 0x40, - 0x9c, 0xe5, 0x89, 0xfe, 0x22, 0x07, 0x35, 0x3f, 0xe8, 0x12, 0xed, 0xed, 0x54, 0xb9, 0x71, 0x34, - 0x7f, 0x0e, 0xd0, 0x7a, 0x62, 0xb0, 0x95, 0x7d, 0xc3, 0x38, 0x12, 0x9a, 0x28, 0x9c, 0x92, 0x8f, - 0x8e, 0x61, 0x91, 0x05, 0x9e, 0x3a, 0xa3, 0xba, 0x06, 0x59, 0x9f, 0xb5, 0xe6, 0xa3, 0x98, 0x2c, - 0x79, 0xae, 0x90, 0xc0, 0x28, 0x36, 0xf9, 0x20, 0x1f, 0x56, 0xdd, 0xa1, 0xdd, 0x27, 0x87, 0x23, - 0xcf, 0x93, 0x2e, 0x5e, 0x77, 0x9b, 0x67, 0x3e, 0xc4, 0xe4, 0x8e, 0xc8, 0x53, 0xe7, 0x82, 0xf4, - 0x48, 0x44, 0x7c, 0x87, 0x24, 0x69, 0xc5, 0xc3, 0x0c, 0x27, 0x3c, 0xc5, 0x1b, 0xdd, 0x87, 0xeb, - 0x61, 0xe4, 0x06, 0x42, 0xd5, 0x9e, 0x4d, 0x65, 0x3e, 0x50, 0x4d, 0x87, 0xf6, 0xc3, 0x2c, 0x01, - 0x9e, 0x1e, 0xc3, 0x83, 0xbc, 0x06, 0x8a, 0xfe, 0x8e, 0x0a, 0xf2, 0x7a, 0x2c, 0x8e, 0xb1, 0x68, - 0x17, 0x2a, 0x76, 0xaf, 0xe7, 0xfa, 0x9c, 0x72, 0x51, 0x98, 0xca, 0x5b, 0xb3, 0x96, 0xd6, 0x56, - 0x34, 0x92, 0x8f, 0xfe, 0xc2, 0xf1, 0xd8, 0xb5, 0xef, 0xc3, 0xf5, 0xa9, 0xad, 0xbb, 0x54, 0x57, - 0xd4, 0x02, 0x48, 0x2e, 0xec, 0xd1, 0x57, 0xa1, 0x44, 0x99, 0x1d, 0xe9, 0x8a, 0x3e, 0xce, 0xc5, - 0x2d, 0x0e, 0xc4, 0x12, 0xc7, 0xcb, 0x38, 0xca, 0x82, 0x30, 0x5b, 0xc6, 0x59, 0x2c, 0x08, 0xb1, - 0xc0, 0x34, 0x27, 0x05, 0x58, 0xd0, 0x81, 0x90, 0x1a, 0x39, 0x6b, 0x6e, 0xde, 0xbb, 0x41, 0xc5, - 0xf4, 0x85, 0xa9, 0x6b, 0x3a, 0x5c, 0xe4, 0xaf, 0x3c, 0x5c, 0x9c, 0x42, 0x39, 0x14, 0xce, 0x58, - 0x39, 0xa8, 0xfb, 0xf3, 0xcb, 0x16, 0xec, 0x64, 0xac, 0x95, 0xbf, 0xb1, 0x12, 0x81, 0x3e, 0x85, - 0xa5, 0x88, 0xb0, 0x68, 0x1c, 0xc7, 0xa6, 0xe2, 0x9c, 0xfd, 0x7c, 0x71, 0x1f, 0x82, 0x4d, 0x96, - 0x38, 0x2d, 0xa1, 0xf9, 0x8b, 0x1c, 0xac, 0x66, 0x95, 0x82, 0x4e, 0xa1, 0x40, 0x23, 0x47, 0x6d, - 0xf2, 0xe1, 0xab, 0xd3, 0xb6, 0x4c, 0x2d, 0x64, 0x3f, 0xca, 0x8a, 0x1c, 0xcc, 0xa5, 0x70, 0x23, - 0xec, 0x12, 0xca, 0xb2, 0x46, 0xb8, 0x4d, 0x28, 0xc3, 0x02, 0x83, 0xf6, 0xa7, 0x53, 0x90, 0xd6, - 0xac, 0x14, 0xe4, 0xcd, 0xac, 0xbc, 0x59, 0x09, 0x48, 0xf3, 0xdf, 0xf2, 0xf0, 0xc6, 0xec, 0x89, - 0xa1, 0xef, 0xc1, 0x72, 0x52, 0xa9, 0x18, 0x7f, 0xd3, 0x89, 0x5b, 0xdd, 0xdb, 0x29, 0x2c, 0xce, - 0x50, 0xf3, 0x98, 0xaf, 0x5e, 0xac, 0xe8, 0xff, 0xea, 0x18, 0x3d, 0xa7, 0xad, 0x18, 0x83, 0x0d, - 0x2a, 0xd4, 0x86, 0x15, 0xf5, 0x75, 0x64, 0x16, 0x84, 0x46, 0x43, 0x79, 0x2b, 0x8d, 0xc6, 0x59, - 0x7a, 0x9e, 0xe3, 0xf2, 0xd8, 0xac, 0x1f, 0x3a, 0x1b, 0x39, 0xee, 0xb6, 0x04, 0x63, 0x8d, 0xe7, - 0xb5, 0x12, 0xff, 0x79, 0x94, 0x7e, 0x09, 0x98, 0x94, 0xc8, 0x06, 0x0e, 0xa7, 0x28, 0x93, 0x27, - 0x8a, 0xb2, 0xbc, 0x9a, 0x7a, 0xa2, 0xd8, 0xfc, 0x79, 0x0e, 0x96, 0x52, 0x26, 0x8e, 0x7a, 0x50, - 0x38, 0xdd, 0xd4, 0xc5, 0xce, 0xde, 0x2b, 0xbc, 0x16, 0x93, 0x16, 0xb4, 0xb7, 0x49, 0x31, 0x17, - 0x80, 0x3e, 0x89, 0xeb, 0xaa, 0xb9, 0xdf, 0x22, 0x99, 0xe9, 0x97, 0x4a, 0x87, 0xd3, 0x25, 0xd6, - 0x4e, 0xbc, 0x48, 0xeb, 0xa9, 0xcb, 0x9c, 0x01, 0x7a, 0x13, 0x0a, 0xb6, 0x3f, 0x16, 0x19, 0x5a, - 0x55, 0xce, 0xab, 0xed, 0x8f, 0x31, 0x87, 0x09, 0x94, 0xe7, 0xa9, 0x8b, 0x7b, 0x89, 0xf2, 0x3c, - 0xcc, 0x61, 0xcd, 0x7f, 0x5e, 0x84, 0x95, 0x8c, 0x0b, 0xbc, 0xc0, 0x53, 0x00, 0x69, 0x5f, 0x5d, - 0x57, 0x06, 0xe9, 0x69, 0xfb, 0x52, 0x18, 0x6c, 0x50, 0xa1, 0xbe, 0xdc, 0x04, 0xe9, 0xbd, 0xf6, - 0xe7, 0xd2, 0x4c, 0xa6, 0x32, 0xca, 0xec, 0xc2, 0x8f, 0x72, 0x50, 0xb3, 0x8d, 0xff, 0xfd, 0x28, - 0xe7, 0xf5, 0x78, 0x9e, 0xfa, 0x64, 0xea, 0x2f, 0x4f, 0xf2, 0x49, 0x8d, 0x89, 0xc0, 0x29, 0xa1, - 0xc8, 0x81, 0xe2, 0x80, 0x31, 0xfd, 0x77, 0x8f, 0x9d, 0x57, 0x72, 0xa7, 0x2d, 0xef, 0x4e, 0x38, - 0x00, 0x0b, 0xe6, 0xe8, 0x29, 0x54, 0xed, 0xa7, 0x54, 0xfe, 0xc1, 0x4f, 0xfd, 0x0f, 0x64, 0x9e, - 0x32, 0x2c, 0xf3, 0x5f, 0x41, 0xd5, 0xd4, 0xd6, 0x50, 0x9c, 0xc8, 0x42, 0x11, 0x94, 0x1d, 0xf1, - 0x52, 0x5c, 0x3d, 0x0d, 0xb8, 0xff, 0x8a, 0x5e, 0x9c, 0xcb, 0x40, 0x91, 0x02, 0x61, 0x25, 0x09, - 0xf5, 0xa1, 0x74, 0x6a, 0xf7, 0x4e, 0x6d, 0x75, 0x83, 0x37, 0xc7, 0xe1, 0x32, 0xef, 0x6c, 0xa5, - 0x03, 0x11, 0x10, 0x2c, 0xf9, 0xf3, 0xad, 0xf3, 0x6d, 0x46, 0xd5, 0xbb, 0xa3, 0x39, 0xb6, 0xce, - 0xb8, 0x85, 0x92, 0x5b, 0xc7, 0x01, 0x58, 0x30, 0xe7, 0xab, 0x11, 0x8d, 0x04, 0x91, 0xd5, 0xcd, - 0xe7, 0x2a, 0x8c, 0x46, 0x8b, 0x5c, 0x8d, 0x80, 0x60, 0xc9, 0x9f, 0xdb, 0x48, 0xa0, 0x6f, 0x59, - 0x54, 0x62, 0x38, 0x87, 0x8d, 0x64, 0x2f, 0x6c, 0xa4, 0x8d, 0xc4, 0x50, 0x9c, 0xc8, 0x42, 0x1f, - 0x43, 0xc1, 0x0b, 0xfa, 0xf5, 0xa5, 0x79, 0x7b, 0xe1, 0xc9, 0xed, 0xa0, 0x3c, 0xe8, 0xfb, 0x41, - 0x1f, 0x73, 0xce, 0x68, 0x04, 0x65, 0x2a, 0x7c, 0x9f, 0xba, 0x96, 0x9c, 0x3f, 0x25, 0x92, 0xae, - 0xb4, 0x73, 0x53, 0x35, 0x0f, 0xf5, 0x9b, 0x2f, 0x01, 0xc5, 0x4a, 0x18, 0xfa, 0xb3, 0x1c, 0x2c, - 0xdb, 0xa9, 0xff, 0x2b, 0xd5, 0x97, 0xe7, 0x7d, 0x70, 0x3b, 0xf3, 0xff, 0x4f, 0xf2, 0x2f, 0x99, - 0x69, 0x14, 0xce, 0x88, 0x6e, 0x3a, 0xb0, 0x68, 0xfc, 0xf1, 0xec, 0x02, 0x57, 0x26, 0xef, 0x01, - 0x9c, 0x91, 0xc8, 0xed, 0x8d, 0xb7, 0x48, 0xc4, 0xd4, 0x3f, 0x5f, 0x62, 0xdf, 0xfd, 0x61, 0x8c, - 0xc1, 0x06, 0x55, 0xa7, 0xf5, 0xf9, 0x17, 0xeb, 0xd7, 0x7e, 0xfa, 0xc5, 0xfa, 0xb5, 0x9f, 0x7d, - 0xb1, 0x7e, 0xed, 0x87, 0xe7, 0xeb, 0xb9, 0xcf, 0xcf, 0xd7, 0x73, 0x3f, 0x3d, 0x5f, 0xcf, 0xfd, - 0xec, 0x7c, 0x3d, 0xf7, 0x1f, 0xe7, 0xeb, 0xb9, 0xbf, 0xfc, 0xf9, 0xfa, 0xb5, 0xdf, 0xad, 0xe8, - 0xe5, 0xfc, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x12, 0x7f, 0xde, 0x00, 0x31, 0x3e, 0x00, 0x00, + // 4961 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x3c, 0x4d, 0x8f, 0x63, 0xc7, + 0x71, 0x4b, 0x0e, 0x39, 0x43, 0xd6, 0x70, 0x3e, 0xb6, 0x57, 0xbb, 0xa6, 0xc6, 0xd6, 0x72, 0x43, + 0x23, 0xca, 0xda, 0x90, 0x39, 0xd2, 0x2a, 0x8e, 0xc7, 0x0a, 0x6c, 0x8b, 0xf3, 0xa5, 0xfd, 0xe0, + 0xee, 0x8c, 0x8a, 0x1c, 0x09, 0xf9, 0x70, 0xa4, 0x37, 0x8f, 0x4d, 0xf2, 0x69, 0x1e, 0xdf, 0xe3, + 0x76, 0x37, 0x67, 0x45, 0x01, 0x4e, 0xec, 0x18, 0x49, 0x60, 0x04, 0x90, 0x73, 0xf0, 0x21, 0x27, + 0x23, 0x40, 0x90, 0x43, 0x80, 0x1c, 0x02, 0xe4, 0x1f, 0x38, 0x40, 0xa0, 0xa3, 0x73, 0xf3, 0x21, + 0x18, 0x44, 0xe3, 0x5c, 0x72, 0x30, 0x12, 0x9f, 0x12, 0xec, 0x25, 0x41, 0x7f, 0xbc, 0x4f, 0x72, + 0xb4, 0xc3, 0xe5, 0x6a, 0x64, 0xc0, 0x37, 0xbe, 0xaa, 0xea, 0xaa, 0xee, 0x7a, 0xd5, 0xd5, 0x55, + 0xd5, 0xf5, 0x08, 0xb7, 0xbb, 0x8e, 0xe8, 0x0d, 0x0f, 0x6b, 0xb6, 0xdf, 0x5f, 0xb7, 0x58, 0xd7, + 0x1f, 0x30, 0xff, 0x3d, 0xf5, 0xe3, 0x2b, 0xf4, 0x98, 0x7a, 0x82, 0xaf, 0x0f, 0x8e, 0xba, 0xeb, + 0xd6, 0xc0, 0xe1, 0xeb, 0x9c, 0x7a, 0xdc, 0x67, 0xeb, 0xc7, 0xaf, 0x58, 0xee, 0xa0, 0x67, 0xbd, + 0xb2, 0xde, 0xa5, 0x1e, 0x65, 0x96, 0xa0, 0xed, 0xda, 0x80, 0xf9, 0xc2, 0x27, 0x1b, 0x11, 0xa7, + 0x5a, 0xc0, 0x49, 0xfd, 0x78, 0x47, 0x73, 0xaa, 0x0d, 0x8e, 0xba, 0x35, 0xc9, 0xa9, 0xa6, 0x39, + 0xd5, 0x02, 0x4e, 0x6b, 0xdf, 0x3a, 0xf7, 0x1c, 0x6c, 0xbf, 0xdf, 0xf7, 0xbd, 0xb4, 0xe8, 0xb5, + 0xaf, 0xc4, 0x18, 0x74, 0xfd, 0xae, 0xbf, 0xae, 0xc0, 0x87, 0xc3, 0x8e, 0x7a, 0x52, 0x0f, 0xea, + 0x97, 0x21, 0xaf, 0x1e, 0x6d, 0xf0, 0x9a, 0xe3, 0x4b, 0x96, 0xeb, 0xb6, 0xcf, 0xe8, 0xfa, 0xf1, + 0xd8, 0x6a, 0xd6, 0x7e, 0x3b, 0xa2, 0xe9, 0x5b, 0x76, 0xcf, 0xf1, 0x28, 0x1b, 0x45, 0xf3, 0xe8, + 0x53, 0x61, 0x4d, 0x1a, 0xb5, 0x7e, 0xd6, 0x28, 0x36, 0xf4, 0x84, 0xd3, 0xa7, 0x63, 0x03, 0x7e, + 0xe7, 0x49, 0x03, 0xb8, 0xdd, 0xa3, 0x7d, 0x2b, 0x3d, 0xae, 0xfa, 0x38, 0x07, 0xab, 0xf5, 0xb7, + 0x9b, 0x0d, 0xab, 0x7f, 0xd8, 0xb6, 0x5a, 0xcc, 0xe9, 0x76, 0x29, 0x23, 0x1b, 0x50, 0xea, 0x0c, + 0x3d, 0x5b, 0x38, 0xbe, 0xf7, 0xc0, 0xea, 0xd3, 0x72, 0xe6, 0x46, 0xe6, 0x66, 0x71, 0xf3, 0xb9, + 0x8f, 0x4e, 0x2a, 0x97, 0x4e, 0x4f, 0x2a, 0xa5, 0xdd, 0x18, 0x0e, 0x13, 0x94, 0x04, 0xa1, 0x68, + 0xd9, 0x36, 0xe5, 0xfc, 0x1e, 0x1d, 0x95, 0xb3, 0x37, 0x32, 0x37, 0x17, 0x6f, 0xfd, 0x66, 0x4d, + 0x4f, 0x4d, 0xbe, 0xb2, 0x9a, 0xd4, 0x52, 0xed, 0xf8, 0x95, 0x5a, 0x93, 0xda, 0x8c, 0x8a, 0x7b, + 0x74, 0xd4, 0xa4, 0x2e, 0xb5, 0x85, 0xcf, 0x36, 0x97, 0x4e, 0x4f, 0x2a, 0xc5, 0x7a, 0x30, 0x16, + 0x23, 0x36, 0x92, 0x27, 0x0f, 0xc8, 0xcb, 0x73, 0x53, 0xf3, 0x0c, 0xc1, 0x18, 0xb1, 0x21, 0x2f, + 0xc2, 0x3c, 0xa3, 0x5d, 0xc7, 0xf7, 0xca, 0x39, 0xb5, 0xb6, 0x65, 0xb3, 0xb6, 0x79, 0x54, 0x50, + 0x34, 0x58, 0x32, 0x84, 0x85, 0x81, 0x35, 0x72, 0x7d, 0xab, 0x5d, 0xce, 0xdf, 0x98, 0xbb, 0xb9, + 0x78, 0xeb, 0x6e, 0xed, 0x69, 0xad, 0xb3, 0x66, 0xb4, 0xbb, 0x6f, 0x31, 0xab, 0x4f, 0x05, 0x65, + 0x9b, 0x2b, 0x46, 0xe8, 0xc2, 0xbe, 0x16, 0x81, 0x81, 0x2c, 0xf2, 0xc7, 0x00, 0x83, 0x80, 0x8c, + 0x97, 0xe7, 0x9f, 0xb9, 0x64, 0x62, 0x24, 0x43, 0x08, 0xe2, 0x18, 0x93, 0x48, 0x5e, 0x83, 0x65, + 0xc7, 0x3b, 0xf6, 0x6d, 0x4b, 0xbe, 0xd8, 0xd6, 0x68, 0x40, 0xcb, 0x0b, 0x4a, 0x4d, 0xe4, 0xf4, + 0xa4, 0xb2, 0x7c, 0x27, 0x81, 0xc1, 0x14, 0x25, 0xf9, 0x12, 0x2c, 0x30, 0xdf, 0xa5, 0x75, 0x7c, + 0x50, 0x2e, 0xa8, 0x41, 0xe1, 0x32, 0x51, 0x83, 0x31, 0xc0, 0x57, 0x7f, 0x91, 0x85, 0x2b, 0x75, + 0xd6, 0xf5, 0xdf, 0xf6, 0xd9, 0x51, 0xc7, 0xf5, 0x1f, 0x05, 0xf6, 0xe7, 0xc1, 0x3c, 0xf7, 0x87, + 0xcc, 0xd6, 0x96, 0x37, 0xd3, 0xd2, 0xeb, 0x4c, 0x38, 0x1d, 0xcb, 0x16, 0x0d, 0x33, 0xc5, 0x4d, + 0x90, 0x6f, 0xb9, 0xa9, 0xb8, 0xa3, 0x91, 0x42, 0x6e, 0x43, 0xd1, 0x1f, 0xc8, 0x6d, 0x21, 0x0d, + 0x22, 0xab, 0x26, 0xfd, 0x65, 0x33, 0xe9, 0xe2, 0x5e, 0x80, 0x78, 0x7c, 0x52, 0xb9, 0x1a, 0x9f, + 0x6c, 0x88, 0xc0, 0x68, 0x70, 0xea, 0xc5, 0xcd, 0x5d, 0xf8, 0x8b, 0xfb, 0x02, 0xe4, 0x2c, 0xd6, + 0xe5, 0xe5, 0xdc, 0x8d, 0xb9, 0x9b, 0xc5, 0xcd, 0xc2, 0xe9, 0x49, 0x25, 0x57, 0x67, 0x5d, 0x8e, + 0x0a, 0x5a, 0xfd, 0xa5, 0xdc, 0xec, 0x29, 0x85, 0x90, 0x26, 0x64, 0xf9, 0xab, 0x46, 0xd1, 0xbf, + 0x7b, 0xfe, 0xa9, 0x6a, 0x0f, 0x5a, 0x6b, 0xbe, 0x1a, 0x30, 0xdc, 0x9c, 0x3f, 0x3d, 0xa9, 0x64, + 0x9b, 0xaf, 0x62, 0x96, 0xbf, 0x4a, 0xaa, 0x30, 0xef, 0x78, 0xae, 0xe3, 0x51, 0xa3, 0x4e, 0xa5, + 0xf5, 0x3b, 0x0a, 0x82, 0x06, 0x43, 0xda, 0x90, 0xeb, 0x38, 0x2e, 0x35, 0x5b, 0x7a, 0xf7, 0xe9, + 0xb5, 0xb4, 0xeb, 0xb8, 0x34, 0x9c, 0x85, 0x5a, 0xb3, 0x84, 0xa0, 0xe2, 0x4e, 0xde, 0x85, 0xb9, + 0x21, 0x73, 0xd5, 0x36, 0x5f, 0xbc, 0xb5, 0xf3, 0xf4, 0x42, 0x0e, 0xb0, 0x11, 0xca, 0x58, 0x38, + 0x3d, 0xa9, 0xcc, 0x1d, 0x60, 0x03, 0x25, 0x6b, 0x72, 0x00, 0x45, 0xdb, 0xf7, 0x3a, 0x4e, 0xb7, + 0x6f, 0x0d, 0xca, 0x79, 0x25, 0xe7, 0xe6, 0x24, 0xff, 0xb4, 0xa5, 0x88, 0xee, 0x5b, 0x83, 0x31, + 0x17, 0xb5, 0x15, 0x0c, 0xc7, 0x88, 0x93, 0x9c, 0x78, 0xd7, 0x11, 0xe5, 0xf9, 0x59, 0x27, 0xfe, + 0x86, 0x23, 0x92, 0x13, 0x7f, 0xc3, 0x11, 0x28, 0x59, 0x13, 0x1b, 0x0a, 0x8c, 0x9a, 0x8d, 0xb6, + 0xa0, 0xc4, 0x7c, 0x7d, 0xea, 0xf7, 0x8f, 0x86, 0xc1, 0x66, 0xe9, 0xf4, 0xa4, 0x52, 0x08, 0x9e, + 0x30, 0x64, 0x5c, 0xfd, 0xa7, 0x1c, 0x5c, 0xad, 0x7f, 0x30, 0x64, 0x74, 0x47, 0x32, 0xb8, 0x3d, + 0x3c, 0xe4, 0xc1, 0x2e, 0xbf, 0x01, 0xb9, 0xce, 0xc3, 0xb6, 0x67, 0x4e, 0x97, 0x92, 0xb1, 0xec, + 0xdc, 0xee, 0x9b, 0xdb, 0x0f, 0x50, 0x61, 0xa4, 0x2b, 0xe9, 0x0d, 0x0f, 0xd5, 0x11, 0x94, 0x4d, + 0xba, 0x92, 0xdb, 0x1a, 0x8c, 0x01, 0x9e, 0x0c, 0xe0, 0x0a, 0xef, 0x59, 0x8c, 0xb6, 0xc3, 0x23, + 0x44, 0x0d, 0x9b, 0xea, 0xb8, 0xf8, 0xdc, 0xe9, 0x49, 0xe5, 0x4a, 0x73, 0x9c, 0x0b, 0x4e, 0x62, + 0x4d, 0xda, 0xb0, 0x92, 0x02, 0x1b, 0x23, 0x3b, 0xa7, 0xb4, 0x2b, 0xa7, 0x27, 0x95, 0x95, 0x94, + 0x34, 0x4c, 0xb3, 0xfc, 0x35, 0x3d, 0x80, 0xaa, 0xff, 0x93, 0x83, 0x6b, 0xca, 0x6a, 0x9a, 0x94, + 0x1d, 0x3b, 0x36, 0xdd, 0x1c, 0x86, 0x66, 0xd3, 0x85, 0x55, 0xdb, 0xf7, 0x3c, 0xaa, 0x82, 0x8e, + 0xa6, 0x60, 0x8e, 0xd7, 0x35, 0xde, 0xeb, 0x9c, 0x8a, 0x7f, 0xee, 0xf4, 0xa4, 0xb2, 0xba, 0x95, + 0x62, 0x81, 0x63, 0x4c, 0xc9, 0x3a, 0x14, 0x1f, 0x0e, 0xe9, 0x90, 0xc6, 0xec, 0xef, 0x72, 0x70, + 0x2a, 0xbc, 0x19, 0x20, 0x30, 0xa2, 0x91, 0x03, 0x84, 0x3f, 0x70, 0xec, 0xd0, 0xf2, 0x62, 0x03, + 0x5a, 0x01, 0x02, 0x23, 0x1a, 0xb2, 0x0d, 0xab, 0x7c, 0x78, 0xc8, 0x6d, 0xe6, 0x0c, 0xc2, 0x58, + 0x4b, 0xc7, 0x23, 0x65, 0x33, 0x6e, 0xb5, 0x99, 0xc2, 0xe3, 0xd8, 0x08, 0x72, 0x00, 0x73, 0xc2, + 0xe5, 0xc6, 0xf3, 0xbc, 0x36, 0xf5, 0x0e, 0x6e, 0x35, 0x9a, 0xda, 0xff, 0x68, 0xef, 0xd0, 0x6a, + 0x34, 0x51, 0xf2, 0x8b, 0x5b, 0xde, 0xfc, 0x67, 0x66, 0x79, 0x0b, 0x17, 0x6e, 0x79, 0x5d, 0xb8, + 0xba, 0xe5, 0x7b, 0x6d, 0x47, 0xaa, 0x97, 0x23, 0xe5, 0x54, 0x6c, 0x8e, 0x5a, 0x4e, 0x9f, 0x4a, + 0x77, 0x65, 0x33, 0x7f, 0xcc, 0x5d, 0x6d, 0x31, 0xdf, 0x43, 0x85, 0x21, 0x2f, 0x41, 0x41, 0x86, + 0xda, 0x1f, 0xf8, 0xe1, 0xb1, 0xb7, 0x6a, 0xa8, 0x0a, 0x2d, 0x03, 0xc7, 0x90, 0xa2, 0xfa, 0x61, + 0x06, 0x3e, 0x97, 0x92, 0xb4, 0xc5, 0x1c, 0x41, 0x99, 0x63, 0x11, 0x0e, 0xf3, 0x87, 0x4a, 0xaa, + 0xb1, 0xec, 0xbd, 0xa7, 0x57, 0xc0, 0xc4, 0xc5, 0xe8, 0xf3, 0x58, 0xff, 0x46, 0x23, 0xaa, 0xfa, + 0x8f, 0x79, 0x58, 0xda, 0x1a, 0x72, 0xe1, 0xf7, 0x83, 0xad, 0xb6, 0x2e, 0x23, 0x6f, 0x76, 0x4c, + 0xd9, 0x01, 0x36, 0xcc, 0xba, 0x43, 0x83, 0x6e, 0x06, 0x08, 0x8c, 0x68, 0x64, 0x58, 0xcd, 0xa9, + 0x3d, 0x64, 0x7a, 0xfd, 0x85, 0x28, 0xac, 0x6e, 0x2a, 0x28, 0x1a, 0x2c, 0x39, 0x00, 0xb0, 0x29, + 0x13, 0x7a, 0x6f, 0x4e, 0xe7, 0xa4, 0x97, 0xe5, 0xbb, 0xdb, 0x0a, 0x07, 0x63, 0x8c, 0x11, 0xb9, + 0x0b, 0x44, 0xcf, 0x45, 0xee, 0x8b, 0xbd, 0x63, 0xca, 0x98, 0xd3, 0x0e, 0x76, 0xd4, 0x9a, 0x99, + 0x0a, 0x69, 0x8e, 0x51, 0xe0, 0x84, 0x51, 0x84, 0x43, 0x8e, 0x0f, 0xa8, 0x6d, 0xbc, 0xee, 0x9b, + 0x33, 0xbc, 0x80, 0xb8, 0x4a, 0x6b, 0xcd, 0x01, 0xb5, 0x77, 0x3c, 0xc1, 0x46, 0x91, 0x05, 0x49, + 0x10, 0x2a, 0x61, 0x9f, 0x79, 0xdc, 0x1f, 0xdb, 0xf3, 0x0b, 0x17, 0xb7, 0xe7, 0xd7, 0xbe, 0x06, + 0xc5, 0x50, 0x2f, 0x64, 0x15, 0xe6, 0x8e, 0xe8, 0x48, 0x9b, 0x1b, 0xca, 0x9f, 0xe4, 0x39, 0xc8, + 0x1f, 0x5b, 0xee, 0xd0, 0x6c, 0x2a, 0xd4, 0x0f, 0xaf, 0x65, 0x37, 0x32, 0xd5, 0x5f, 0x64, 0x00, + 0xb6, 0x2d, 0x61, 0xed, 0x3a, 0xae, 0xd0, 0x11, 0xc5, 0xc0, 0x12, 0xbd, 0xf4, 0x16, 0xdd, 0xb7, + 0x44, 0x0f, 0x15, 0x86, 0xbc, 0x04, 0x39, 0x21, 0xd3, 0x99, 0x6c, 0xc2, 0xcb, 0xe6, 0x64, 0xe2, + 0xf2, 0xf8, 0xa4, 0x52, 0xb8, 0xdb, 0xdc, 0x7b, 0xa0, 0x92, 0x1a, 0x45, 0x45, 0x2a, 0x81, 0xe0, + 0x39, 0x15, 0x4e, 0x17, 0x4f, 0x4f, 0x2a, 0xf9, 0xb7, 0x24, 0xc0, 0xcc, 0x81, 0xbc, 0x0e, 0x60, + 0xfb, 0x7d, 0xa9, 0x40, 0xe1, 0x33, 0x63, 0x68, 0x37, 0x02, 0x1d, 0x6f, 0x85, 0x98, 0xc7, 0x89, + 0x27, 0x8c, 0x8d, 0x51, 0x3e, 0x83, 0xf6, 0x07, 0xae, 0x25, 0xa8, 0xf2, 0xe0, 0x71, 0x9f, 0x61, + 0xe0, 0x18, 0x52, 0x54, 0xff, 0x63, 0x0e, 0x4a, 0x3b, 0x7d, 0xcb, 0x71, 0x83, 0x1d, 0x9a, 0x34, + 0x98, 0xcc, 0x85, 0x1b, 0xcc, 0x4b, 0x50, 0x18, 0x72, 0xca, 0xbc, 0xe8, 0x88, 0x0c, 0xa7, 0x7f, + 0x60, 0xe0, 0x18, 0x52, 0x90, 0x3f, 0x80, 0x12, 0xef, 0x8b, 0xc1, 0xbe, 0xc5, 0xf9, 0x23, 0x9f, + 0xb5, 0xa7, 0xdb, 0xf8, 0xab, 0xa7, 0x27, 0x95, 0x52, 0xf3, 0x7e, 0x6b, 0x3f, 0x18, 0x8e, 0x09, + 0x66, 0xf2, 0xe5, 0xf7, 0x7c, 0x2e, 0xcc, 0x5b, 0x08, 0x5f, 0xfe, 0x6d, 0x9f, 0x0b, 0x54, 0x18, + 0x65, 0x1e, 0x3e, 0x13, 0x4a, 0xcf, 0xf9, 0x98, 0x79, 0xf8, 0x4c, 0xa0, 0xc2, 0x90, 0x6b, 0x90, + 0x15, 0xbe, 0xda, 0x77, 0x45, 0x9d, 0xce, 0xb4, 0x7c, 0xcc, 0x0a, 0x5f, 0x85, 0xaa, 0xcc, 0xef, + 0x9b, 0x2c, 0x38, 0x0a, 0x55, 0x99, 0xdf, 0x47, 0x85, 0x91, 0xa1, 0x2a, 0x1f, 0x1e, 0xbe, 0x47, + 0x6d, 0x91, 0xce, 0x7a, 0x9b, 0x1a, 0x8c, 0x01, 0x5e, 0x32, 0x3b, 0xf4, 0xdb, 0xa3, 0x72, 0x31, + 0xc9, 0x6c, 0xd3, 0x6f, 0x8f, 0x50, 0x61, 0xaa, 0x3f, 0xce, 0x40, 0x5e, 0x85, 0xcb, 0xa4, 0x0f, + 0x0b, 0xb6, 0xef, 0x09, 0xfa, 0xbe, 0x30, 0x27, 0xc1, 0x0c, 0x69, 0x92, 0xe2, 0xb8, 0xa5, 0xb9, + 0x6d, 0x2e, 0xca, 0xa9, 0x99, 0x07, 0x0c, 0x64, 0xc8, 0xf4, 0xb1, 0x6d, 0x09, 0x4b, 0xbd, 0xca, + 0x92, 0x4e, 0xa5, 0xe4, 0xf6, 0x42, 0x05, 0x7d, 0xad, 0xf0, 0xd7, 0x7f, 0x53, 0xb9, 0xf4, 0xdd, + 0x7f, 0xbb, 0x71, 0xa9, 0xfa, 0xcb, 0x2c, 0x94, 0xe2, 0xec, 0xc8, 0x1a, 0x64, 0x9d, 0xb6, 0xd9, + 0x77, 0x60, 0x56, 0x94, 0xbd, 0xb3, 0x8d, 0x59, 0xa7, 0xad, 0x0e, 0x05, 0x9d, 0x64, 0x64, 0x93, + 0xb5, 0x96, 0x54, 0x16, 0xfe, 0x55, 0x58, 0x94, 0x4e, 0xf0, 0x98, 0x32, 0x2e, 0xf3, 0x70, 0x1d, + 0x40, 0x5d, 0x31, 0xc4, 0x8b, 0xd2, 0x41, 0xbc, 0xa5, 0x51, 0x18, 0xa7, 0x93, 0xea, 0x54, 0x5b, + 0x3a, 0xf5, 0xde, 0x63, 0xdb, 0xb8, 0x0e, 0x2b, 0x72, 0xfe, 0x6a, 0x91, 0x9e, 0x50, 0xc4, 0x7a, + 0xab, 0x7d, 0xce, 0x10, 0xaf, 0xc8, 0x45, 0x6e, 0x69, 0xb4, 0x1a, 0x97, 0xa6, 0x8f, 0xbf, 0xde, + 0xf9, 0x27, 0xbc, 0xde, 0x06, 0xe4, 0xe4, 0x19, 0x6f, 0x32, 0xaa, 0x2f, 0xc7, 0x8c, 0x3b, 0x2c, + 0xcc, 0x45, 0xef, 0xa8, 0x4f, 0x85, 0x25, 0xcd, 0x5d, 0x1d, 0xca, 0xd1, 0xdc, 0xe5, 0xb1, 0xac, + 0xb8, 0xc4, 0x74, 0xfe, 0x61, 0x0e, 0x56, 0x94, 0xce, 0xb7, 0xe9, 0x80, 0x7a, 0x6d, 0xea, 0xd9, + 0x23, 0xb9, 0x76, 0x2f, 0x2a, 0xd0, 0x85, 0xe3, 0x55, 0xa0, 0xa8, 0x30, 0x72, 0xed, 0xca, 0x2e, + 0xb4, 0xae, 0x63, 0xa1, 0x6c, 0xb8, 0xf6, 0x9d, 0x24, 0x1a, 0xd3, 0xf4, 0x32, 0x0a, 0x50, 0xa0, + 0x49, 0x61, 0xed, 0x4e, 0x80, 0xc0, 0x88, 0x86, 0x1c, 0xc3, 0x42, 0x47, 0x39, 0x64, 0x6e, 0x32, + 0xa2, 0xbd, 0x19, 0x8d, 0x36, 0x5a, 0xb1, 0x76, 0xf4, 0xda, 0x7a, 0xf5, 0x6f, 0x8e, 0x81, 0x30, + 0xf2, 0xbd, 0x0c, 0x14, 0x05, 0xb3, 0x3c, 0xde, 0xf1, 0x59, 0xdf, 0xc4, 0xc3, 0xad, 0x67, 0x26, + 0xba, 0x15, 0x70, 0xa6, 0x26, 0x6b, 0x0f, 0x01, 0x18, 0x49, 0x25, 0x0e, 0x5c, 0x33, 0xd3, 0x69, + 0xf8, 0x5d, 0xc7, 0xb6, 0x5c, 0x5d, 0x26, 0xf2, 0x99, 0xb1, 0x9b, 0x57, 0x8c, 0xe6, 0xae, 0xed, + 0x4e, 0xa4, 0x7a, 0x7c, 0x52, 0x59, 0x49, 0x81, 0xf0, 0x0c, 0x86, 0xd5, 0xbf, 0xcf, 0xc3, 0xd5, + 0x89, 0xea, 0x21, 0x87, 0xc6, 0x04, 0xb5, 0xcb, 0xd8, 0x9e, 0xe1, 0x3c, 0x70, 0xfa, 0xd4, 0xa8, + 0xbc, 0x90, 0x34, 0xcc, 0xb8, 0x67, 0xca, 0x5e, 0x80, 0x67, 0xea, 0x18, 0xcf, 0xa4, 0x4b, 0x6a, + 0x33, 0x2c, 0x29, 0x0a, 0x17, 0xa2, 0xfd, 0x12, 0xf9, 0x38, 0xe2, 0x40, 0x9e, 0xbe, 0x3f, 0x60, + 0xba, 0x82, 0x36, 0x93, 0xa0, 0x9d, 0xf7, 0x07, 0xcc, 0x08, 0x5a, 0x32, 0x82, 0xf2, 0x12, 0xc6, + 0x51, 0x4b, 0x20, 0xef, 0xc2, 0x15, 0x29, 0x32, 0x6d, 0x27, 0xda, 0x35, 0xd5, 0xcc, 0x90, 0x2b, + 0xdb, 0xe3, 0x24, 0x93, 0x8c, 0x64, 0x12, 0x2b, 0x29, 0x41, 0x8a, 0x9a, 0x6c, 0x89, 0xa1, 0x84, + 0x9d, 0x71, 0x92, 0x89, 0x12, 0x26, 0xb0, 0x52, 0xbe, 0x5d, 0x25, 0xa3, 0xe6, 0x68, 0x8c, 0x7c, + 0xbb, 0x82, 0xa2, 0xc1, 0x56, 0xdf, 0x85, 0xb5, 0xb3, 0xb7, 0x93, 0x3c, 0x3d, 0xde, 0x7b, 0x98, + 0x3e, 0x3d, 0xee, 0xbe, 0x89, 0xd9, 0xf7, 0x1e, 0xc6, 0x24, 0x64, 0x3f, 0x51, 0xc2, 0x8f, 0x33, + 0x00, 0x91, 0xca, 0xa5, 0x67, 0x94, 0xf3, 0x4d, 0x7b, 0x46, 0x49, 0x81, 0x0a, 0x43, 0x3c, 0x98, + 0xef, 0x38, 0xd4, 0x6d, 0xf3, 0x72, 0x56, 0xbd, 0xea, 0x19, 0xec, 0xd7, 0x04, 0xb4, 0xbb, 0x92, + 0x5d, 0x34, 0x41, 0xf5, 0xc8, 0xd1, 0x48, 0xa9, 0xbe, 0x0c, 0xa5, 0x78, 0xa1, 0xf2, 0xc9, 0xc1, + 0x6a, 0xf5, 0xcf, 0xf3, 0xb0, 0x18, 0xab, 0xde, 0x91, 0x17, 0x74, 0x29, 0x53, 0x0f, 0x58, 0x34, + 0x03, 0xa2, 0x3a, 0xe4, 0x37, 0x61, 0xd9, 0x76, 0x7d, 0x8f, 0x6e, 0x3b, 0x4c, 0x45, 0x4c, 0x23, + 0xa3, 0xb1, 0x6b, 0x86, 0x72, 0x79, 0x2b, 0x81, 0xc5, 0x14, 0x35, 0xb1, 0x21, 0x6f, 0x33, 0xda, + 0xe6, 0x26, 0x2c, 0xdb, 0x9c, 0xa9, 0xe4, 0xb8, 0x25, 0x39, 0xe9, 0x88, 0x59, 0xfd, 0x44, 0xcd, + 0x5b, 0x85, 0x80, 0xbc, 0xa7, 0xe2, 0x3a, 0x95, 0xfb, 0xe5, 0xa6, 0x0f, 0x01, 0x9b, 0xb7, 0xc3, + 0xe1, 0x98, 0x60, 0x26, 0xa3, 0xd1, 0x8e, 0xe3, 0x52, 0xa9, 0xc2, 0x74, 0x30, 0xbd, 0x6b, 0xe0, + 0x18, 0x52, 0x48, 0xcb, 0x3a, 0x64, 0x96, 0x67, 0xf7, 0xcc, 0x86, 0x08, 0x5f, 0xdc, 0xa6, 0x82, + 0xa2, 0xc1, 0x4a, 0xb5, 0x0b, 0xab, 0x6b, 0x0c, 0x3c, 0x54, 0x7b, 0xcb, 0xea, 0xa2, 0x84, 0x4b, + 0x34, 0xa3, 0x1d, 0x13, 0xf5, 0x85, 0x68, 0xa4, 0x1d, 0x94, 0x70, 0xd2, 0x87, 0x79, 0x46, 0xfb, + 0xbe, 0xa0, 0x2a, 0xde, 0x5b, 0xbc, 0x75, 0x67, 0x26, 0xb5, 0xa2, 0x62, 0x65, 0xea, 0x35, 0xa0, + 0x2f, 0xac, 0x24, 0x04, 0x8d, 0x10, 0xd2, 0x84, 0xab, 0x8e, 0xa7, 0xb3, 0xec, 0x3b, 0x5d, 0xcf, + 0x67, 0x54, 0xc6, 0xbf, 0xf7, 0xe8, 0xa8, 0x0c, 0x2a, 0x21, 0x7f, 0xc1, 0xcc, 0xef, 0xea, 0x9d, + 0x49, 0x44, 0x38, 0x79, 0x6c, 0xf5, 0x1f, 0x32, 0x50, 0x08, 0xde, 0x29, 0xd9, 0x8b, 0x85, 0xfc, + 0x53, 0xd5, 0xdd, 0x4a, 0x67, 0x64, 0x05, 0x7b, 0x50, 0x18, 0x04, 0x19, 0x41, 0x76, 0x6a, 0x86, + 0x61, 0x36, 0x10, 0x32, 0xa9, 0xbe, 0x09, 0x2b, 0x29, 0x55, 0x9d, 0x23, 0x50, 0xfa, 0x02, 0xe4, + 0x86, 0xcc, 0xd5, 0xce, 0xc0, 0xdc, 0x9c, 0x1c, 0x60, 0xa3, 0x89, 0x0a, 0x5a, 0xfd, 0xcf, 0x79, + 0x58, 0xbc, 0xdd, 0x6a, 0xed, 0x07, 0x79, 0xd7, 0x13, 0xb6, 0x62, 0x2c, 0x8f, 0xce, 0x5e, 0x60, + 0xed, 0xcc, 0x54, 0x02, 0xe7, 0x9e, 0x71, 0x25, 0xf0, 0x45, 0x98, 0xef, 0x53, 0xd1, 0xf3, 0xdb, + 0xe9, 0xcb, 0xd2, 0xfb, 0x0a, 0x8a, 0x06, 0x9b, 0x4a, 0x46, 0xf3, 0x17, 0x9e, 0x8c, 0x7e, 0x09, + 0x16, 0x64, 0x68, 0xe2, 0x0f, 0x75, 0x90, 0x3e, 0x17, 0x69, 0xaa, 0xa5, 0xc1, 0x18, 0xe0, 0x49, + 0x17, 0x8a, 0x87, 0x16, 0x77, 0xec, 0xfa, 0x50, 0xf4, 0x4c, 0xa4, 0x3e, 0xbd, 0xbe, 0x36, 0x03, + 0x0e, 0x3a, 0x1e, 0x0c, 0x1f, 0x31, 0xe2, 0x4d, 0xbe, 0x03, 0x0b, 0x3d, 0x6a, 0xb5, 0xa5, 0x42, + 0x0a, 0x4a, 0x21, 0xf8, 0xf4, 0x0a, 0x89, 0x19, 0x60, 0xed, 0xb6, 0x66, 0xaa, 0x4b, 0x49, 0xd1, + 0xb5, 0x88, 0x86, 0x62, 0x20, 0x93, 0x1c, 0xc3, 0x92, 0xde, 0xd0, 0x06, 0x53, 0x2e, 0xaa, 0x49, + 0x7c, 0x63, 0xfa, 0x7b, 0xbe, 0x18, 0x97, 0xcd, 0xcb, 0xa7, 0x27, 0x95, 0xa5, 0x38, 0x84, 0x63, + 0x52, 0xcc, 0xda, 0x6b, 0x50, 0x8a, 0xcf, 0x70, 0xaa, 0xa2, 0xce, 0x9f, 0xcd, 0xc1, 0xe5, 0x7b, + 0x1b, 0xcd, 0xe0, 0x2e, 0x69, 0xdf, 0x77, 0x1d, 0x7b, 0x44, 0xfe, 0x04, 0xe6, 0x5d, 0xeb, 0x90, + 0xba, 0x41, 0x95, 0xe3, 0xed, 0xa7, 0xd7, 0xe3, 0x18, 0xf3, 0x5a, 0x43, 0x71, 0xd6, 0xca, 0x0c, + 0xad, 0x5b, 0x03, 0xd1, 0x88, 0x25, 0xef, 0xc0, 0xc2, 0xa1, 0x65, 0x1f, 0xf9, 0x9d, 0x8e, 0xf1, + 0x52, 0x1b, 0x4f, 0x61, 0x30, 0x6a, 0xbc, 0x0e, 0x71, 0xcd, 0x03, 0x06, 0x5c, 0xa5, 0xeb, 0xa6, + 0x8c, 0xf9, 0x6c, 0xcf, 0x33, 0x28, 0x63, 0xb5, 0x6a, 0x3f, 0xc7, 0x5c, 0xf7, 0xce, 0x24, 0x22, + 0x9c, 0x3c, 0x76, 0xed, 0xeb, 0xb0, 0x18, 0x5b, 0xdc, 0x54, 0xef, 0xe1, 0x27, 0x0b, 0x50, 0xba, + 0x67, 0x75, 0x8e, 0xac, 0x73, 0x3a, 0xbd, 0x2f, 0x42, 0x5e, 0x5d, 0x6d, 0x98, 0xb0, 0x23, 0x0c, + 0x7a, 0xd5, 0xd5, 0x07, 0x6a, 0x9c, 0x4c, 0x26, 0x07, 0x16, 0x13, 0xaa, 0x22, 0xad, 0x16, 0x96, + 0x8f, 0x92, 0xc9, 0xfd, 0x00, 0x81, 0x11, 0x4d, 0xca, 0xa9, 0xe4, 0x2e, 0xdc, 0xa9, 0x6c, 0x40, + 0x89, 0xd1, 0x87, 0x43, 0x47, 0xdd, 0xca, 0x1d, 0x71, 0x53, 0x3c, 0x0a, 0x7b, 0x61, 0x30, 0x86, + 0xc3, 0x04, 0xa5, 0x8c, 0x46, 0x6c, 0xbf, 0x3f, 0x60, 0x94, 0x73, 0xe5, 0x8f, 0x0a, 0x51, 0x34, + 0xb2, 0x65, 0xe0, 0x18, 0x52, 0xc8, 0xe8, 0xad, 0xe3, 0x0e, 0x79, 0x6f, 0x57, 0xf2, 0x90, 0x01, + 0xb2, 0x72, 0x4b, 0xf9, 0x28, 0x7a, 0xdb, 0x4d, 0x60, 0x31, 0x45, 0x1d, 0xf8, 0xfe, 0xc2, 0xa7, + 0x77, 0x0b, 0x54, 0xbc, 0xc0, 0x93, 0xec, 0x1b, 0xb0, 0x12, 0x9a, 0x80, 0xe3, 0x75, 0x83, 0x00, + 0xa6, 0xa8, 0x6f, 0x4d, 0xf7, 0x93, 0x28, 0x4c, 0xd3, 0xca, 0x93, 0x20, 0x28, 0x23, 0x2d, 0x26, + 0xcb, 0x35, 0x41, 0x09, 0x29, 0xc0, 0x93, 0xdf, 0x83, 0x1c, 0xb7, 0xb8, 0x5b, 0x2e, 0x3d, 0x6d, + 0x03, 0x44, 0xbd, 0xd9, 0x30, 0x9a, 0x53, 0x41, 0x83, 0x7c, 0x46, 0xc5, 0x92, 0x7c, 0x2f, 0x03, + 0xcb, 0xba, 0xed, 0x0a, 0x69, 0xd7, 0xe1, 0x82, 0x8d, 0xca, 0x4b, 0xd3, 0xde, 0xe6, 0x07, 0x52, + 0x12, 0x6c, 0x8c, 0x3c, 0xd5, 0x8d, 0x93, 0xc4, 0x60, 0x4a, 0x60, 0x75, 0x0f, 0xa0, 0xe1, 0x77, + 0x83, 0x1d, 0x5c, 0x87, 0x15, 0xc7, 0x13, 0x94, 0x1d, 0x5b, 0x6e, 0x93, 0xda, 0xbe, 0xd7, 0xe6, + 0x6a, 0x37, 0xe7, 0xa2, 0x6a, 0xd0, 0x9d, 0x24, 0x1a, 0xd3, 0xf4, 0xd5, 0xbf, 0x9b, 0x83, 0xc5, + 0x07, 0xf5, 0x56, 0xf3, 0x9c, 0x4e, 0x21, 0x56, 0x38, 0xcb, 0x3e, 0xa1, 0x70, 0x16, 0x33, 0xb5, + 0xb9, 0xcf, 0xec, 0xc2, 0xf1, 0xe2, 0x1d, 0xcc, 0xa7, 0x73, 0x7d, 0x5b, 0xfd, 0x61, 0x0e, 0x56, + 0xf7, 0x06, 0xd4, 0x7b, 0xbb, 0xe7, 0xf0, 0xa3, 0x58, 0xcb, 0x85, 0xaa, 0x91, 0x67, 0xce, 0xac, + 0x91, 0xc7, 0x76, 0x4e, 0xf6, 0x09, 0x3b, 0x67, 0x1d, 0x8a, 0x32, 0x72, 0xe6, 0x03, 0xcb, 0x1e, + 0xab, 0x0b, 0x3e, 0x08, 0x10, 0x18, 0xd1, 0xa8, 0xe6, 0xc0, 0xa1, 0xe8, 0xb5, 0xfc, 0x23, 0xea, + 0x4d, 0x97, 0xf8, 0xe9, 0xe6, 0xc0, 0x60, 0x2c, 0x46, 0x6c, 0xc8, 0x2d, 0x00, 0x2b, 0x6a, 0x54, + 0xd4, 0x49, 0x5f, 0xa8, 0xf1, 0x7a, 0xd4, 0xa6, 0x18, 0xa3, 0xfa, 0x75, 0xbd, 0xd9, 0x46, 0x28, + 0xc5, 0x0b, 0x15, 0xe7, 0xb8, 0x2d, 0x0b, 0xb2, 0xa6, 0xec, 0x59, 0x59, 0x53, 0xf5, 0xff, 0x8a, + 0xb0, 0xb4, 0x3f, 0x74, 0xb9, 0xc5, 0x9e, 0x65, 0x90, 0xf0, 0x59, 0x77, 0xd1, 0xc5, 0x0c, 0x24, + 0x77, 0x81, 0x06, 0x32, 0x80, 0x2b, 0xc2, 0xe5, 0x2d, 0x36, 0xe4, 0x62, 0x8b, 0x32, 0xc1, 0x4d, + 0x89, 0x24, 0x3f, 0x75, 0x0f, 0x53, 0xab, 0xd1, 0x4c, 0x73, 0xc1, 0x49, 0xac, 0xc9, 0x21, 0xac, + 0x09, 0x97, 0xd7, 0x5d, 0xd7, 0x7f, 0x14, 0x14, 0x04, 0xa2, 0xc6, 0x18, 0x13, 0xb4, 0x54, 0xcd, + 0x7c, 0xd7, 0x5a, 0x8d, 0xe6, 0x19, 0x94, 0xf8, 0x09, 0x5c, 0xc8, 0x7d, 0xb5, 0xaa, 0xb7, 0x2c, + 0xd7, 0x69, 0x5b, 0x42, 0x95, 0x14, 0x94, 0x4d, 0x2d, 0x28, 0xe6, 0x9f, 0x0f, 0x8a, 0x90, 0xad, + 0x46, 0x33, 0x4d, 0x82, 0x93, 0xc6, 0x7d, 0x5a, 0x71, 0x4e, 0x1b, 0x56, 0x42, 0xa7, 0x62, 0xf4, + 0x5e, 0x9c, 0xba, 0x9b, 0xab, 0x9e, 0xe4, 0x80, 0x69, 0x96, 0xe4, 0x3b, 0x70, 0x39, 0x6a, 0x33, + 0x32, 0x91, 0xba, 0x0a, 0x6c, 0x66, 0xc9, 0x26, 0xae, 0x9e, 0x9e, 0x54, 0x2e, 0x6f, 0xa5, 0xd9, + 0xe2, 0xb8, 0x24, 0xf2, 0xb7, 0x19, 0x58, 0x95, 0x53, 0xaa, 0x8b, 0x1e, 0xf5, 0x3e, 0x50, 0x26, + 0xc9, 0xcb, 0x8b, 0xca, 0xc2, 0xbf, 0x3d, 0x43, 0xf5, 0x33, 0xbe, 0xff, 0x6b, 0xf5, 0x14, 0x7f, + 0x9d, 0x54, 0x85, 0xfd, 0x4c, 0x69, 0x34, 0x8e, 0x4d, 0x88, 0x74, 0xe3, 0x93, 0x34, 0xef, 0xa2, + 0x34, 0x75, 0x83, 0x57, 0x3d, 0xc5, 0x02, 0xc7, 0x98, 0xae, 0x6d, 0xc1, 0xd5, 0x89, 0xb3, 0x9d, + 0x2a, 0x4b, 0xfa, 0xd3, 0x0c, 0x14, 0xd1, 0x12, 0xb4, 0xe1, 0xf4, 0x1d, 0x41, 0x6e, 0x41, 0x6e, + 0xe8, 0x39, 0xc1, 0x01, 0x7b, 0x3d, 0xf0, 0x98, 0x07, 0x9e, 0x23, 0x1e, 0x9f, 0x54, 0x96, 0x43, + 0x42, 0x2a, 0x21, 0xa8, 0x68, 0x65, 0x50, 0xa6, 0xa2, 0x78, 0x2e, 0xf8, 0x3e, 0x65, 0x12, 0xa1, + 0xa4, 0xe4, 0xa3, 0xa0, 0x0c, 0x93, 0x68, 0x4c, 0xd3, 0x57, 0x7f, 0x92, 0x85, 0xf9, 0xa6, 0x7a, + 0x2d, 0xe4, 0x5d, 0x28, 0xf4, 0xa9, 0xb0, 0xd4, 0x65, 0x89, 0x2e, 0xcf, 0xbd, 0x7c, 0xbe, 0x2b, + 0xc8, 0x3d, 0x15, 0x85, 0xdd, 0xa7, 0xc2, 0x8a, 0xfc, 0x63, 0x04, 0xc3, 0x90, 0x2b, 0xe9, 0x98, + 0xce, 0x98, 0xec, 0xac, 0xb7, 0x4b, 0x7a, 0xc6, 0xcd, 0x01, 0xb5, 0x27, 0x36, 0xc3, 0x78, 0x30, + 0xcf, 0x85, 0x25, 0x86, 0x7c, 0xf6, 0x0e, 0x61, 0x23, 0x49, 0x71, 0x8b, 0xdd, 0x20, 0xa8, 0x67, + 0x34, 0x52, 0xaa, 0xff, 0x9a, 0x01, 0xd0, 0x84, 0x0d, 0x87, 0x0b, 0xf2, 0x87, 0x63, 0x8a, 0xac, + 0x9d, 0x4f, 0x91, 0x72, 0xb4, 0x52, 0x63, 0x98, 0xee, 0x05, 0x90, 0x98, 0x12, 0x29, 0xe4, 0x1d, + 0x41, 0xfb, 0xc1, 0xe5, 0xc3, 0xeb, 0xb3, 0xae, 0x2d, 0x3a, 0x49, 0xef, 0x48, 0xb6, 0xa8, 0xb9, + 0x57, 0x3f, 0x9a, 0x0f, 0xd6, 0x24, 0x15, 0x4b, 0xbe, 0x9f, 0x81, 0x52, 0x3b, 0xb8, 0x82, 0x71, + 0x68, 0x50, 0x4b, 0xb9, 0xf3, 0xcc, 0x2e, 0x49, 0xa3, 0xc4, 0x78, 0x3b, 0x26, 0x06, 0x13, 0x42, + 0x89, 0x0f, 0x05, 0xa1, 0xbd, 0x45, 0xb0, 0xfc, 0xfa, 0xcc, 0xe7, 0x6b, 0xac, 0x6d, 0xc6, 0xb0, + 0xc6, 0x50, 0x08, 0x71, 0x63, 0x4d, 0x36, 0x33, 0x5f, 0x6e, 0x04, 0x6d, 0x39, 0xba, 0xfc, 0x3c, + 0xde, 0xa4, 0x43, 0xee, 0x02, 0x31, 0xb5, 0x98, 0x5d, 0xcb, 0x71, 0x69, 0x1b, 0xfd, 0xa1, 0xa7, + 0x4b, 0xa7, 0x85, 0xa8, 0x0b, 0x6d, 0x67, 0x8c, 0x02, 0x27, 0x8c, 0x22, 0x1b, 0x50, 0x52, 0xf3, + 0xd9, 0x1c, 0xf2, 0x58, 0x80, 0x1b, 0x2a, 0x79, 0x27, 0x86, 0xc3, 0x04, 0x25, 0xb9, 0x09, 0x05, + 0x46, 0x07, 0xae, 0x63, 0x5b, 0xba, 0xfa, 0x90, 0x0f, 0x3a, 0xb4, 0x35, 0x0c, 0x43, 0x2c, 0x69, + 0xc0, 0x73, 0x8c, 0x1e, 0x3b, 0x32, 0xa6, 0xbf, 0xed, 0x70, 0xe1, 0xb3, 0x91, 0x72, 0x51, 0xa6, + 0xfe, 0x50, 0x3e, 0x3d, 0xa9, 0x3c, 0x87, 0x13, 0xf0, 0x38, 0x71, 0x14, 0xf9, 0x51, 0x06, 0x96, + 0x5c, 0xbf, 0xdb, 0x75, 0xbc, 0xae, 0xbe, 0x00, 0x33, 0x75, 0xcf, 0xb7, 0x9f, 0x85, 0x9f, 0xa8, + 0x35, 0xe2, 0x9c, 0xf5, 0xd1, 0x72, 0xd5, 0x28, 0x63, 0x29, 0x81, 0xc3, 0xe4, 0x24, 0xd6, 0x5e, + 0x07, 0x32, 0x3e, 0x76, 0x2a, 0x47, 0xef, 0x43, 0x29, 0xee, 0x46, 0xc8, 0x3b, 0xa1, 0x7b, 0xd2, + 0xde, 0xe1, 0x6b, 0xd3, 0x27, 0xf5, 0x9f, 0xec, 0x8f, 0xbe, 0x0d, 0x8b, 0x4d, 0xd7, 0xb2, 0x8f, + 0x9a, 0x72, 0xe7, 0xb0, 0x44, 0xab, 0x55, 0xe6, 0x89, 0xad, 0x56, 0x37, 0x20, 0xe7, 0xd8, 0x61, + 0x12, 0x17, 0xba, 0xd7, 0x3b, 0xb6, 0xef, 0xa1, 0xc2, 0x54, 0xff, 0x39, 0x63, 0xf8, 0xb7, 0x7a, + 0x8c, 0x5a, 0x6d, 0xd2, 0x84, 0xab, 0x7d, 0xca, 0xb9, 0xd5, 0xa5, 0xf5, 0x6e, 0x97, 0xd1, 0xae, + 0xfa, 0x3a, 0xe4, 0x5e, 0xa0, 0x9d, 0xa8, 0xfc, 0x78, 0x7f, 0x12, 0x11, 0x4e, 0x1e, 0x4b, 0xde, + 0x81, 0xe7, 0x0f, 0x99, 0x6f, 0xb5, 0x6d, 0x4b, 0x7a, 0x40, 0x45, 0xd1, 0xf2, 0xb7, 0x7a, 0x96, + 0xe7, 0x51, 0xd7, 0xf4, 0x88, 0xfe, 0x86, 0x61, 0xfc, 0xfc, 0xe6, 0x59, 0x84, 0x78, 0x36, 0x8f, + 0xea, 0xff, 0xe6, 0xa0, 0xa4, 0x57, 0xf1, 0x2b, 0xd2, 0x11, 0x77, 0x00, 0xc0, 0xd5, 0x7c, 0x54, + 0x96, 0x9b, 0x9d, 0xba, 0xb5, 0xb5, 0x19, 0x0e, 0xc6, 0x18, 0x23, 0x99, 0x97, 0xdb, 0x46, 0x6d, + 0x73, 0xc9, 0xbc, 0x3c, 0x50, 0x52, 0x80, 0x97, 0xa4, 0xe6, 0x65, 0x98, 0xfb, 0x9a, 0x90, 0xd4, + 0x68, 0x0f, 0x03, 0x3c, 0xf9, 0x2a, 0x2c, 0x5a, 0x42, 0x58, 0x76, 0xaf, 0x2f, 0xb5, 0x60, 0xbc, + 0x4b, 0xd8, 0x72, 0x55, 0x8f, 0x50, 0x18, 0xa7, 0x53, 0x37, 0xa7, 0xae, 0x6f, 0x1f, 0xf1, 0xb1, + 0x9b, 0x53, 0x05, 0x45, 0x83, 0x25, 0x7d, 0x98, 0x17, 0xca, 0xb8, 0xcc, 0x15, 0xcb, 0x0c, 0x5f, + 0xb1, 0xc4, 0x2c, 0x35, 0x12, 0xa7, 0x9f, 0xd1, 0x08, 0x91, 0xe2, 0xb8, 0xda, 0x2b, 0x26, 0x3b, + 0x98, 0x55, 0x9c, 0xde, 0x78, 0xf1, 0x26, 0x66, 0xf9, 0x8c, 0x46, 0x48, 0xf5, 0xbf, 0xe7, 0x80, + 0x34, 0x85, 0xe5, 0xb5, 0x2d, 0xd6, 0xbe, 0xb7, 0xd1, 0xfc, 0xac, 0x3e, 0x5e, 0x7b, 0x30, 0xfe, + 0xf1, 0xda, 0xcb, 0x93, 0x3e, 0x5e, 0xfb, 0xfc, 0xbd, 0xe1, 0x21, 0x65, 0x1e, 0x15, 0x94, 0x07, + 0xb7, 0x1f, 0xbf, 0x92, 0x9f, 0xb0, 0x75, 0x60, 0x69, 0x60, 0x09, 0xbb, 0xd7, 0x14, 0xcc, 0x12, + 0xb4, 0x3b, 0x32, 0x46, 0xfc, 0x7a, 0xe0, 0xe6, 0xf7, 0xe3, 0xc8, 0xc7, 0x27, 0x95, 0xdf, 0x3a, + 0xeb, 0xcb, 0x57, 0x31, 0x1a, 0x50, 0x5e, 0x53, 0xe4, 0xaa, 0xa9, 0x2f, 0xc9, 0x96, 0xdc, 0x02, + 0x70, 0x9d, 0x63, 0xaa, 0x43, 0x5c, 0x65, 0xfa, 0x85, 0x68, 0x6e, 0x8d, 0x10, 0x83, 0x31, 0xaa, + 0xea, 0x3a, 0x94, 0xb4, 0x93, 0x36, 0x97, 0x52, 0x15, 0xc8, 0x5b, 0x32, 0xed, 0x55, 0x7e, 0x26, + 0xaf, 0xdb, 0x1d, 0x54, 0x1e, 0x8c, 0x1a, 0x5e, 0xfd, 0x41, 0x01, 0xc2, 0x10, 0x81, 0xd8, 0x63, + 0x11, 0xe5, 0xf4, 0xdf, 0x5b, 0xdd, 0x37, 0x0c, 0xf4, 0x69, 0x1e, 0x3c, 0xc5, 0x02, 0x4b, 0xd3, + 0x03, 0xef, 0xd8, 0xb4, 0x6e, 0xdb, 0xfe, 0xd0, 0xb4, 0xed, 0x65, 0xc7, 0x7b, 0xe0, 0x93, 0x14, + 0x38, 0x61, 0x14, 0xb9, 0xab, 0xbe, 0x6c, 0x13, 0x96, 0xd4, 0xa9, 0x09, 0x9c, 0x5e, 0x38, 0xe3, + 0xcb, 0x36, 0x4d, 0x14, 0x7e, 0xce, 0xa6, 0x1f, 0x31, 0x1a, 0x4e, 0x76, 0x60, 0xe1, 0xd8, 0x77, + 0x87, 0x7d, 0x1a, 0xd4, 0x58, 0xd7, 0x26, 0x71, 0x7a, 0x4b, 0x91, 0xc4, 0x8a, 0x8e, 0x7a, 0x08, + 0x06, 0x63, 0x09, 0x85, 0x15, 0x55, 0x61, 0x70, 0xc4, 0xc8, 0xf4, 0x88, 0x99, 0xfa, 0xc8, 0x8b, + 0x93, 0xd8, 0xed, 0xfb, 0xed, 0x66, 0x92, 0xda, 0x7c, 0x76, 0x95, 0x04, 0x62, 0x9a, 0x27, 0xf9, + 0x30, 0x03, 0x25, 0xcf, 0x6f, 0xd3, 0xc0, 0x37, 0x9b, 0x42, 0x61, 0x6b, 0xf6, 0xb0, 0xb1, 0xf6, + 0x20, 0xc6, 0x56, 0x47, 0x30, 0x61, 0x38, 0x17, 0x47, 0x61, 0x42, 0x3e, 0x39, 0x80, 0x45, 0xe1, + 0xbb, 0x66, 0x8f, 0x06, 0xd5, 0xc3, 0xeb, 0x93, 0xd6, 0xdc, 0x0a, 0xc9, 0x22, 0x4f, 0x1e, 0xc1, + 0x38, 0xc6, 0xf9, 0x10, 0x0f, 0x56, 0x9d, 0xbe, 0xd5, 0xa5, 0xfb, 0x43, 0xd7, 0xd5, 0x07, 0x52, + 0x10, 0xaf, 0x4d, 0xfc, 0x84, 0x51, 0x3a, 0x22, 0xd7, 0xec, 0x0b, 0xda, 0xa1, 0x8c, 0x7a, 0x36, + 0x8d, 0x72, 0xfb, 0x3b, 0x29, 0x4e, 0x38, 0xc6, 0x9b, 0xbc, 0x01, 0x97, 0x07, 0xcc, 0xf1, 0x95, + 0xaa, 0x5d, 0x8b, 0xeb, 0xa0, 0x56, 0x37, 0x42, 0x3f, 0x6f, 0xd8, 0x5c, 0xde, 0x4f, 0x13, 0xe0, + 0xf8, 0x18, 0x19, 0xde, 0x06, 0x40, 0x55, 0x40, 0x31, 0xe1, 0x6d, 0x30, 0x16, 0x43, 0x2c, 0xd9, + 0x85, 0x82, 0xd5, 0xe9, 0x38, 0x9e, 0xa4, 0x5c, 0x54, 0xa6, 0xf2, 0x85, 0x49, 0x4b, 0xab, 0x1b, + 0x1a, 0xcd, 0x27, 0x78, 0xc2, 0x70, 0xec, 0xda, 0xb7, 0xe0, 0xf2, 0xd8, 0xab, 0x9b, 0x2a, 0x80, + 0x6c, 0x02, 0x44, 0xfd, 0x94, 0xe4, 0x8b, 0x90, 0xe7, 0xc2, 0x62, 0x41, 0xa9, 0x20, 0x4c, 0xdf, + 0x9a, 0x12, 0x88, 0x1a, 0x27, 0xa3, 0x38, 0x2e, 0xfc, 0x41, 0x3a, 0x8a, 0x6b, 0x0a, 0x7f, 0x80, + 0x0a, 0x53, 0xfd, 0x8b, 0x3c, 0x2c, 0x04, 0x27, 0x0f, 0x8f, 0xa5, 0x39, 0x99, 0x59, 0x9b, 0x8d, + 0x0c, 0xd3, 0x27, 0x66, 0x3b, 0xc9, 0xe3, 0x22, 0x7b, 0xe1, 0xc7, 0xc5, 0x11, 0xcc, 0x0f, 0x94, + 0x33, 0x36, 0x0e, 0xea, 0x8d, 0xd9, 0x65, 0x2b, 0x76, 0xfa, 0xac, 0xd5, 0xbf, 0xd1, 0x88, 0x20, + 0x0f, 0x61, 0x89, 0x51, 0xc1, 0x46, 0x89, 0xb3, 0x69, 0x96, 0xda, 0x9d, 0xea, 0xa4, 0xc0, 0x38, + 0x4b, 0x4c, 0x4a, 0x20, 0x03, 0x28, 0xb2, 0xa0, 0x6a, 0x64, 0x5c, 0xdd, 0xd6, 0xd3, 0x2f, 0x31, + 0x2c, 0x40, 0x69, 0x4f, 0x1d, 0x3e, 0x62, 0x24, 0x44, 0x07, 0x85, 0x0d, 0x6a, 0x71, 0xb1, 0xe7, + 0xd9, 0xd4, 0x54, 0x81, 0x63, 0x41, 0x61, 0x88, 0xc2, 0x38, 0x5d, 0xf5, 0xbf, 0x32, 0xb0, 0x9a, + 0x7e, 0x7b, 0xe4, 0x08, 0xe6, 0x38, 0xb3, 0x8d, 0x35, 0xee, 0x3f, 0x3b, 0xb3, 0xd0, 0x31, 0x90, + 0xae, 0xe1, 0x36, 0x99, 0x8d, 0x52, 0x8a, 0xdc, 0x2d, 0x6d, 0xca, 0x45, 0x7a, 0xb7, 0x6c, 0x53, + 0x2e, 0x50, 0x61, 0x48, 0x23, 0x1e, 0x2b, 0xcd, 0x25, 0xda, 0x60, 0x13, 0xb1, 0xd2, 0xf3, 0x69, + 0x79, 0x93, 0x22, 0xa5, 0xea, 0x0f, 0xe6, 0xe0, 0xda, 0xe4, 0x89, 0x91, 0x6f, 0xc2, 0x72, 0x58, + 0xf2, 0x18, 0xc5, 0xfe, 0x43, 0x23, 0xbc, 0xcd, 0xdf, 0x4e, 0x60, 0x31, 0x45, 0x2d, 0x83, 0x13, + 0xd3, 0xf9, 0x1c, 0xfc, 0x91, 0x46, 0xec, 0x5a, 0x6b, 0x2b, 0xc4, 0x60, 0x8c, 0x8a, 0xd4, 0x61, + 0xc5, 0x3c, 0xb5, 0xe2, 0xc5, 0x8e, 0x58, 0xab, 0xff, 0x56, 0x12, 0x8d, 0x69, 0x7a, 0x99, 0x3a, + 0xc8, 0x20, 0x22, 0xf8, 0x96, 0x39, 0x96, 0x3a, 0x6c, 0x6b, 0x30, 0x06, 0x78, 0xb2, 0x01, 0x25, + 0xf9, 0xb3, 0x95, 0xfc, 0x78, 0x29, 0x2a, 0xff, 0xc4, 0x70, 0x98, 0xa0, 0x8c, 0xbe, 0xaa, 0xd2, + 0xc9, 0xc3, 0xf8, 0x57, 0x55, 0xb7, 0x00, 0x86, 0x9c, 0xa2, 0xf5, 0x48, 0x32, 0x31, 0x17, 0x05, + 0xe1, 0xe2, 0x0f, 0x42, 0x0c, 0xc6, 0xa8, 0xaa, 0x3f, 0xcf, 0xc0, 0x52, 0x62, 0xff, 0x92, 0x0e, + 0xcc, 0x1d, 0x6d, 0x04, 0xc9, 0xf9, 0xbd, 0x67, 0xd8, 0x2d, 0xa4, 0xad, 0xee, 0xde, 0x06, 0x47, + 0x29, 0x80, 0xbc, 0x17, 0xd6, 0x01, 0x66, 0xee, 0x83, 0x8f, 0xc7, 0x96, 0x26, 0xd6, 0x4f, 0x96, + 0x04, 0xfe, 0x65, 0x19, 0x56, 0x52, 0x8e, 0xf9, 0x1c, 0xad, 0x8d, 0xda, 0x98, 0xcc, 0x57, 0xa0, + 0x13, 0x8c, 0x29, 0xf8, 0x3e, 0x34, 0x46, 0x45, 0xba, 0x5a, 0x7b, 0xda, 0xa7, 0x36, 0x66, 0x5a, + 0x52, 0x2a, 0x41, 0x4a, 0xa9, 0xef, 0xfb, 0x19, 0x28, 0x59, 0xb1, 0xbf, 0xd5, 0x30, 0x2e, 0xf5, + 0xfe, 0x2c, 0x59, 0xd3, 0xd8, 0x3f, 0x8a, 0xe8, 0xce, 0xe1, 0x38, 0x02, 0x13, 0x42, 0x89, 0x0d, + 0xb9, 0x9e, 0x10, 0xc1, 0xdf, 0x37, 0xec, 0x3c, 0x93, 0x1e, 0x3d, 0xdd, 0x0f, 0x22, 0x01, 0xa8, + 0x98, 0x93, 0x47, 0x50, 0xb4, 0x1e, 0x71, 0xfd, 0x57, 0x3b, 0xe6, 0x7f, 0x1d, 0x66, 0x49, 0x0e, + 0x53, 0xff, 0xda, 0x63, 0x2e, 0xc9, 0x03, 0x28, 0x46, 0xb2, 0x08, 0x83, 0x79, 0x5b, 0x7d, 0x85, + 0x6a, 0xf2, 0xf0, 0x37, 0x9e, 0xd1, 0xd7, 0xac, 0xfa, 0xf8, 0x4a, 0x80, 0xd0, 0x48, 0x22, 0x5d, + 0xc8, 0x1f, 0x59, 0x9d, 0x23, 0xcb, 0xe4, 0xe2, 0x33, 0xec, 0x8a, 0x78, 0x0f, 0x9a, 0xf6, 0x16, + 0x0a, 0x82, 0x9a, 0xbf, 0x7c, 0x75, 0x9e, 0x25, 0xb8, 0xb9, 0xae, 0x9b, 0xe1, 0xd5, 0xc5, 0xba, + 0x5a, 0xf4, 0xab, 0x93, 0x00, 0x54, 0xcc, 0xe5, 0x6a, 0x54, 0x31, 0xc6, 0x5c, 0xd6, 0xed, 0xce, + 0x5a, 0xc8, 0x88, 0xaf, 0x46, 0x41, 0x50, 0xf3, 0x97, 0x36, 0xe2, 0x07, 0x5d, 0x1b, 0x26, 0x5c, + 0x9d, 0xc1, 0x46, 0xd2, 0x0d, 0x20, 0xda, 0x46, 0x42, 0x28, 0x46, 0xb2, 0xc8, 0x3b, 0x30, 0xe7, + 0xfa, 0x5d, 0x73, 0xd1, 0x36, 0xc3, 0xa5, 0x4e, 0xd4, 0x6d, 0xa4, 0x37, 0x7a, 0xc3, 0xef, 0xa2, + 0xe4, 0x4c, 0xfe, 0x32, 0x03, 0xcb, 0x56, 0xe2, 0x8f, 0x40, 0x4c, 0x37, 0xd4, 0x0c, 0x5f, 0x87, + 0x4d, 0xfc, 0x63, 0x11, 0xdd, 0x17, 0x95, 0x44, 0x61, 0x4a, 0xb4, 0x0a, 0x1b, 0xd5, 0xbd, 0x65, + 0x79, 0x79, 0xd6, 0x2d, 0x91, 0xb8, 0xff, 0x34, 0x61, 0xa3, 0x02, 0xa1, 0x11, 0x41, 0x7e, 0x94, + 0x51, 0x47, 0x73, 0xfc, 0x3b, 0xfc, 0xf2, 0xca, 0xcc, 0xdf, 0x95, 0x4f, 0xfe, 0xef, 0x80, 0xc4, + 0x69, 0x1f, 0x27, 0xc0, 0xf4, 0x14, 0xc8, 0x0f, 0x33, 0xb0, 0x62, 0x25, 0xff, 0x64, 0xa3, 0xbc, + 0x3a, 0x6b, 0xa4, 0x36, 0xf9, 0x5f, 0x3b, 0xcc, 0xfd, 0x78, 0x12, 0x87, 0x69, 0xe9, 0x72, 0x9b, + 0xd1, 0xbe, 0xe5, 0xb8, 0xe5, 0xcb, 0x33, 0x7f, 0x52, 0x16, 0xfb, 0x4a, 0x5a, 0x6f, 0x33, 0x05, + 0x41, 0xcd, 0xbf, 0x6a, 0xc3, 0x62, 0xec, 0x0f, 0x7d, 0xce, 0xd1, 0x0a, 0x73, 0x0b, 0xe0, 0x98, + 0x32, 0xa7, 0x33, 0xda, 0xa2, 0x4c, 0x98, 0xca, 0x75, 0x78, 0x86, 0xbe, 0x15, 0x62, 0x30, 0x46, + 0xb5, 0xf9, 0x47, 0x1f, 0x7d, 0x7c, 0xfd, 0xd2, 0x4f, 0x3f, 0xbe, 0x7e, 0xe9, 0x67, 0x1f, 0x5f, + 0xbf, 0xf4, 0xdd, 0xd3, 0xeb, 0x99, 0x8f, 0x4e, 0xaf, 0x67, 0x7e, 0x7a, 0x7a, 0x3d, 0xf3, 0xb3, + 0xd3, 0xeb, 0x99, 0x7f, 0x3f, 0xbd, 0x9e, 0xf9, 0xab, 0x9f, 0x5f, 0xbf, 0xf4, 0xfb, 0x1b, 0x4f, + 0xfb, 0xc7, 0x79, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x22, 0xad, 0xca, 0xcf, 0x73, 0x4f, 0x00, + 0x00, } func (m *AWSLambdaTrigger) Marshal() (dAtA []byte, err error) { @@ -1463,6 +1730,11 @@ func (m *AWSLambdaTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.RoleARN) + copy(dAtA[i:], m.RoleARN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleARN))) + i-- + dAtA[i] = 0x42 if m.InvocationType != nil { i -= len(*m.InvocationType) copy(dAtA[i:], *m.InvocationType) @@ -1555,16 +1827,15 @@ func (m *ArgoWorkflowTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - { - size, err := m.GroupVersionResource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x22 if len(m.Parameters) > 0 { for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { { @@ -1786,7 +2057,7 @@ func (m *AzureEventHubsTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *CustomTrigger) Marshal() (dAtA []byte, err error) { +func (m *AzureServiceBusTrigger) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1796,25 +2067,20 @@ func (m *CustomTrigger) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CustomTrigger) MarshalTo(dAtA []byte) (int, error) { +func (m *AzureServiceBusTrigger) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CustomTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *AzureServiceBusTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.DeprecatedCertFilePath) - copy(dAtA[i:], m.DeprecatedCertFilePath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedCertFilePath))) - i-- - dAtA[i] = 0x42 - if len(m.Payload) > 0 { - for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1825,10 +2091,10 @@ func (m *CustomTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x3a } } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Payload) > 0 { + for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1839,13 +2105,171 @@ func (m *CustomTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x32 } } - if len(m.Spec) > 0 { - keysForSpec := make([]string, 0, len(m.Spec)) - for k := range m.Spec { - keysForSpec = append(keysForSpec, string(k)) + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForSpec) - for iNdEx := len(keysForSpec) - 1; iNdEx >= 0; iNdEx-- { + i-- + dAtA[i] = 0x2a + } + i -= len(m.SubscriptionName) + copy(dAtA[i:], m.SubscriptionName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubscriptionName))) + i-- + dAtA[i] = 0x22 + i -= len(m.TopicName) + copy(dAtA[i:], m.TopicName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopicName))) + i-- + dAtA[i] = 0x1a + i -= len(m.QueueName) + copy(dAtA[i:], m.QueueName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QueueName))) + i-- + dAtA[i] = 0x12 + if m.ConnectionString != nil { + { + size, err := m.ConnectionString.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConditionsResetByTime) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConditionsResetByTime) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConditionsResetByTime) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Timezone) + copy(dAtA[i:], m.Timezone) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Timezone))) + i-- + dAtA[i] = 0x12 + i -= len(m.Cron) + copy(dAtA[i:], m.Cron) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Cron))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConditionsResetCriteria) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConditionsResetCriteria) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConditionsResetCriteria) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ByTime != nil { + { + size, err := m.ByTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CustomTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payload) > 0 { + for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.Spec) > 0 { + keysForSpec := make([]string, 0, len(m.Spec)) + for k := range m.Spec { + keysForSpec = append(keysForSpec, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSpec) + for iNdEx := len(keysForSpec) - 1; iNdEx >= 0; iNdEx-- { v := m.Spec[string(keysForSpec[iNdEx])] baseI := i i -= len(v) @@ -1948,7 +2372,7 @@ func (m *DataFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *DependencyGroup) Marshal() (dAtA []byte, err error) { +func (m *EmailTrigger) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1958,30 +2382,79 @@ func (m *DependencyGroup) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *DependencyGroup) MarshalTo(dAtA []byte) (int, error) { +func (m *EmailTrigger) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *DependencyGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *EmailTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.Dependencies) > 0 { - for iNdEx := len(m.Dependencies) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Dependencies[iNdEx]) - copy(dAtA[i:], m.Dependencies[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Dependencies[iNdEx]))) + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x4a + i -= len(m.Subject) + copy(dAtA[i:], m.Subject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subject))) + i-- + dAtA[i] = 0x42 + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x3a + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.To[iNdEx]) + copy(dAtA[i:], m.To[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.To[iNdEx]))) i-- - dAtA[i] = 0x12 + dAtA[i] = 0x32 } } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) i-- - dAtA[i] = 0xa + dAtA[i] = 0x28 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x22 + if m.SMTPPassword != nil { + { + size, err := m.SMTPPassword.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } return len(dAtA) - i, nil } @@ -2110,6 +2583,23 @@ func (m *EventDependency) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.FiltersLogicalOperator) + copy(dAtA[i:], m.FiltersLogicalOperator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FiltersLogicalOperator))) + i-- + dAtA[i] = 0x32 + if m.Transform != nil { + { + size, err := m.Transform.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.Filters != nil { { size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) @@ -2160,6 +2650,21 @@ func (m *EventDependencyFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Script) + copy(dAtA[i:], m.Script) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Script))) + i-- + dAtA[i] = 0x3a + i -= len(m.ExprLogicalOperator) + copy(dAtA[i:], m.ExprLogicalOperator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExprLogicalOperator))) + i-- + dAtA[i] = 0x32 + i -= len(m.DataLogicalOperator) + copy(dAtA[i:], m.DataLogicalOperator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataLogicalOperator))) + i-- + dAtA[i] = 0x2a if len(m.Exprs) > 0 { for iNdEx := len(m.Exprs) - 1; iNdEx >= 0; iNdEx-- { { @@ -2215,6 +2720,39 @@ func (m *EventDependencyFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *EventDependencyTransformer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDependencyTransformer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDependencyTransformer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Script) + copy(dAtA[i:], m.Script) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Script))) + i-- + dAtA[i] = 0x12 + i -= len(m.JQ) + copy(dAtA[i:], m.JQ) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JQ))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *ExprFilter) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2305,11 +2843,14 @@ func (m *GitArtifact) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.DeprecatedSSHKeyPath) - copy(dAtA[i:], m.DeprecatedSSHKeyPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedSSHKeyPath))) i-- - dAtA[i] = 0x52 + if m.InsecureIgnoreHostKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 if m.Remote != nil { { size, err := m.Remote.MarshalToSizedBuffer(dAtA[:i]) @@ -2676,6 +3217,18 @@ func (m *KafkaTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SchemaRegistry != nil { + { + size, err := m.SchemaRegistry.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } if m.SASL != nil { { size, err := m.SASL.MarshalToSizedBuffer(dAtA[:i]) @@ -2693,11 +3246,13 @@ func (m *KafkaTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) i-- dAtA[i] = 0x5a - i -= len(m.PartitioningKey) - copy(dAtA[i:], m.PartitioningKey) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PartitioningKey))) - i-- - dAtA[i] = 0x52 + if m.PartitioningKey != nil { + i -= len(*m.PartitioningKey) + copy(dAtA[i:], *m.PartitioningKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PartitioningKey))) + i-- + dAtA[i] = 0x52 + } if len(m.Payload) > 0 { for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { { @@ -2983,7 +3538,7 @@ func (m *PayloadField) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *Sensor) Marshal() (dAtA []byte, err error) { +func (m *PulsarTrigger) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -2993,23 +3548,215 @@ func (m *Sensor) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Sensor) MarshalTo(dAtA []byte) (int, error) { +func (m *PulsarTrigger) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *Sensor) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *PulsarTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.AuthAthenzSecret != nil { + { + size, err := m.AuthAthenzSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x62 + } + if len(m.AuthAthenzParams) > 0 { + keysForAuthAthenzParams := make([]string, 0, len(m.AuthAthenzParams)) + for k := range m.AuthAthenzParams { + keysForAuthAthenzParams = append(keysForAuthAthenzParams, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuthAthenzParams) + for iNdEx := len(keysForAuthAthenzParams) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuthAthenzParams[string(keysForAuthAthenzParams[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuthAthenzParams[iNdEx]) + copy(dAtA[i:], keysForAuthAthenzParams[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuthAthenzParams[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.ConnectionBackoff != nil { + { + size, err := m.ConnectionBackoff.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.AuthTokenSecret != nil { + { + size, err := m.AuthTokenSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i-- + if m.TLSValidateHostname { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + i-- + if m.TLSAllowInsecureConnection { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.TLSTrustCertsSecret != nil { + { + size, err := m.TLSTrustCertsSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Payload) > 0 { + for iNdEx := len(m.Payload) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Payload[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Topic) + copy(dAtA[i:], m.Topic) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Topic))) + i-- + dAtA[i] = 0x12 + i -= len(m.URL) + copy(dAtA[i:], m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URL))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.RequestsPerUnit)) + i-- + dAtA[i] = 0x10 + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Sensor) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sensor) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sensor) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a @@ -3103,21 +3850,45 @@ func (m *SensorSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.LoggingFields) > 0 { + keysForLoggingFields := make([]string, 0, len(m.LoggingFields)) + for k := range m.LoggingFields { + keysForLoggingFields = append(keysForLoggingFields, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLoggingFields) + for iNdEx := len(keysForLoggingFields) - 1; iNdEx >= 0; iNdEx-- { + v := m.LoggingFields[string(keysForLoggingFields[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLoggingFields[iNdEx]) + copy(dAtA[i:], keysForLoggingFields[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLoggingFields[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x42 + } + } + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x38 + } if m.Replicas != nil { i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) i-- - dAtA[i] = 0x40 + dAtA[i] = 0x30 } - i -= len(m.DeprecatedCircuit) - copy(dAtA[i:], m.DeprecatedCircuit) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedCircuit))) - i-- - dAtA[i] = 0x3a i -= len(m.EventBusName) copy(dAtA[i:], m.EventBusName) i = encodeVarintGenerated(dAtA, i, uint64(len(m.EventBusName))) i-- - dAtA[i] = 0x32 + dAtA[i] = 0x2a i-- if m.ErrorOnFailedRound { dAtA[i] = 1 @@ -3125,21 +3896,7 @@ func (m *SensorSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0 } i-- - dAtA[i] = 0x28 - if len(m.DependencyGroups) > 0 { - for iNdEx := len(m.DependencyGroups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DependencyGroups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } + dAtA[i] = 0x20 if m.Template != nil { { size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) @@ -3216,7 +3973,7 @@ func (m *SensorStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SlackTrigger) Marshal() (dAtA []byte, err error) { +func (m *SlackSender) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3226,56 +3983,30 @@ func (m *SlackTrigger) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SlackTrigger) MarshalTo(dAtA []byte) (int, error) { +func (m *SlackSender) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *SlackTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SlackSender) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i -= len(m.Icon) + copy(dAtA[i:], m.Icon) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Icon))) i-- - dAtA[i] = 0x22 - i -= len(m.Channel) - copy(dAtA[i:], m.Channel) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Channel))) + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) i-- - dAtA[i] = 0x1a - if m.SlackToken != nil { - { - size, err := m.SlackToken.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } + dAtA[i] = 0xa return len(dAtA) - i, nil } -func (m *StandardK8STrigger) Marshal() (dAtA []byte, err error) { +func (m *SlackThread) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3285,51 +4016,95 @@ func (m *StandardK8STrigger) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StandardK8STrigger) MarshalTo(dAtA []byte) (int, error) { +func (m *SlackThread) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StandardK8STrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SlackThread) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l i-- - if m.LiveObject { + if m.BroadcastMessageToChannel { dAtA[i] = 1 } else { dAtA[i] = 0 } i-- - dAtA[i] = 0x30 - i -= len(m.PatchStrategy) - copy(dAtA[i:], m.PatchStrategy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchStrategy))) + dAtA[i] = 0x10 + i -= len(m.MessageAggregationKey) + copy(dAtA[i:], m.MessageAggregationKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MessageAggregationKey))) i-- - dAtA[i] = 0x2a - if len(m.Parameters) > 0 { - for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SlackTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SlackTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SlackTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Sender.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i -= len(m.Operation) - copy(dAtA[i:], m.Operation) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x42 + { + size, err := m.Thread.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.Blocks) + copy(dAtA[i:], m.Blocks) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Blocks))) + i-- + dAtA[i] = 0x32 + i -= len(m.Attachments) + copy(dAtA[i:], m.Attachments) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attachments))) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Channel) + copy(dAtA[i:], m.Channel) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Channel))) i-- dAtA[i] = 0x1a - if m.Source != nil { + if m.SlackToken != nil { { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.SlackToken.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3339,16 +4114,87 @@ func (m *StandardK8STrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - { - size, err := m.GroupVersionResource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + return len(dAtA) - i, nil +} + +func (m *StandardK8STrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StandardK8STrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StandardK8STrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.LiveObject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } i-- - dAtA[i] = 0xa + dAtA[i] = 0x28 + i -= len(m.PatchStrategy) + copy(dAtA[i:], m.PatchStrategy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PatchStrategy))) + i-- + dAtA[i] = 0x22 + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + if m.Source != nil { + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } return len(dAtA) - i, nil } @@ -3587,6 +4433,26 @@ func (m *Trigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.AtLeastOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.RateLimit != nil { + { + size, err := m.RateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } if m.RetryStrategy != nil { { size, err := m.RetryStrategy.MarshalToSizedBuffer(dAtA[:i]) @@ -3705,6 +4571,14 @@ func (m *TriggerParameterSource) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + i-- + if m.UseRawData { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 if m.Value != nil { i -= len(*m.Value) copy(dAtA[i:], *m.Value) @@ -3787,7 +4661,7 @@ func (m *TriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *TriggerSwitch) Marshal() (dAtA []byte, err error) { +func (m *TriggerTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -3797,60 +4671,61 @@ func (m *TriggerSwitch) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *TriggerSwitch) MarshalTo(dAtA []byte) (int, error) { +func (m *TriggerTemplate) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *TriggerSwitch) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *TriggerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l - if len(m.All) > 0 { - for iNdEx := len(m.All) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.All[iNdEx]) - copy(dAtA[i:], m.All[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.All[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.Email != nil { + { + size, err := m.Email.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a } - if len(m.Any) > 0 { - for iNdEx := len(m.Any) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Any[iNdEx]) - copy(dAtA[i:], m.Any[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Any[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.AzureServiceBus != nil { + { + size, err := m.AzureServiceBus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 } - return len(dAtA) - i, nil -} - -func (m *TriggerTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.ConditionsReset) > 0 { + for iNdEx := len(m.ConditionsReset) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConditionsReset[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } } - return dAtA[:n], nil -} - -func (m *TriggerTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TriggerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AzureEventHubs != nil { + if m.Pulsar != nil { { - size, err := m.AzureEventHubs.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Pulsar.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3860,9 +4735,9 @@ func (m *TriggerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x72 } - if m.Log != nil { + if m.AzureEventHubs != nil { { - size, err := m.Log.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.AzureEventHubs.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -3872,9 +4747,9 @@ func (m *TriggerTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x6a } - if m.DeprecatedSwitch != nil { + if m.Log != nil { { - size, err := m.DeprecatedSwitch.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Log.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -4086,6 +4961,8 @@ func (m *AWSLambdaTrigger) Size() (n int) { l = len(*m.InvocationType) n += 1 + l + sovGenerated(uint64(l)) } + l = len(m.RoleARN) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4107,8 +4984,12 @@ func (m *ArgoWorkflowTrigger) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = m.GroupVersionResource.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -4182,31 +5063,92 @@ func (m *AzureEventHubsTrigger) Size() (n int) { return n } -func (m *CustomTrigger) Size() (n int) { +func (m *AzureServiceBusTrigger) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.ServerURL) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.CertSecret != nil { - l = m.CertSecret.Size() + if m.ConnectionString != nil { + l = m.ConnectionString.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.ServerNameOverride) + l = len(m.QueueName) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Spec) > 0 { - for k, v := range m.Spec { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + l = len(m.TopicName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubscriptionName) + n += 1 + l + sovGenerated(uint64(l)) + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Parameters) > 0 { - for _, e := range m.Parameters { + if len(m.Payload) > 0 { + for _, e := range m.Payload { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConditionsResetByTime) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Cron) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Timezone) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConditionsResetCriteria) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ByTime != nil { + l = m.ByTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CustomTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServerURL) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.CertSecret != nil { + l = m.CertSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ServerNameOverride) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Spec) > 0 { + for k, v := range m.Spec { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -4217,8 +5159,6 @@ func (m *CustomTrigger) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.DeprecatedCertFilePath) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4245,20 +5185,39 @@ func (m *DataFilter) Size() (n int) { return n } -func (m *DependencyGroup) Size() (n int) { +func (m *EmailTrigger) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Name) + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Username) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Dependencies) > 0 { - for _, s := range m.Dependencies { + if m.SMTPPassword != nil { + l = m.SMTPPassword.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + if len(m.To) > 0 { + for _, s := range m.To { l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.From) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subject) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Body) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4318,6 +5277,12 @@ func (m *EventDependency) Size() (n int) { l = m.Filters.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Transform != nil { + l = m.Transform.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.FiltersLogicalOperator) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4347,6 +5312,25 @@ func (m *EventDependencyFilter) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + l = len(m.DataLogicalOperator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExprLogicalOperator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Script) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventDependencyTransformer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JQ) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Script) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4408,8 +5392,7 @@ func (m *GitArtifact) Size() (n int) { l = m.Remote.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.DeprecatedSSHKeyPath) - n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -4547,14 +5530,20 @@ func (m *KafkaTrigger) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.PartitioningKey) - n += 1 + l + sovGenerated(uint64(l)) + if m.PartitioningKey != nil { + l = len(*m.PartitioningKey) + n += 1 + l + sovGenerated(uint64(l)) + } l = len(m.Version) n += 1 + l + sovGenerated(uint64(l)) if m.SASL != nil { l = m.SASL.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.SchemaRegistry != nil { + l = m.SchemaRegistry.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -4643,6 +5632,73 @@ func (m *PayloadField) Size() (n int) { return n } +func (m *PulsarTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Topic) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Payload) > 0 { + for _, e := range m.Payload { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.TLSTrustCertsSecret != nil { + l = m.TLSTrustCertsSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + n += 2 + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AuthTokenSecret != nil { + l = m.AuthTokenSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConnectionBackoff != nil { + l = m.ConnectionBackoff.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AuthAthenzParams) > 0 { + for k, v := range m.AuthAthenzParams { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AuthAthenzSecret != nil { + l = m.AuthAthenzSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Unit) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.RequestsPerUnit)) + return n +} + func (m *Sensor) Size() (n int) { if m == nil { return 0 @@ -4697,20 +5753,23 @@ func (m *SensorSpec) Size() (n int) { l = m.Template.Size() n += 1 + l + sovGenerated(uint64(l)) } - if len(m.DependencyGroups) > 0 { - for _, e := range m.DependencyGroups { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } n += 2 l = len(m.EventBusName) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedCircuit) - n += 1 + l + sovGenerated(uint64(l)) if m.Replicas != nil { n += 1 + sovGenerated(uint64(*m.Replicas)) } + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + if len(m.LoggingFields) > 0 { + for k, v := range m.LoggingFields { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } return n } @@ -4725,6 +5784,31 @@ func (m *SensorStatus) Size() (n int) { return n } +func (m *SlackSender) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Icon) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SlackThread) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MessageAggregationKey) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *SlackTrigger) Size() (n int) { if m == nil { return 0 @@ -4745,6 +5829,14 @@ func (m *SlackTrigger) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Attachments) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Blocks) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Thread.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Sender.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -4754,8 +5846,6 @@ func (m *StandardK8STrigger) Size() (n int) { } var l int _ = l - l = m.GroupVersionResource.Size() - n += 1 + l + sovGenerated(uint64(l)) if m.Source != nil { l = m.Source.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -4883,6 +5973,11 @@ func (m *Trigger) Size() (n int) { l = m.RetryStrategy.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.RateLimit != nil { + l = m.RateLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 return n } @@ -4923,6 +6018,7 @@ func (m *TriggerParameterSource) Size() (n int) { l = len(*m.Value) n += 1 + l + sovGenerated(uint64(l)) } + n += 2 return n } @@ -4943,27 +6039,6 @@ func (m *TriggerPolicy) Size() (n int) { return n } -func (m *TriggerSwitch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Any) > 0 { - for _, s := range m.Any { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.All) > 0 { - for _, s := range m.All { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *TriggerTemplate) Size() (n int) { if m == nil { return 0 @@ -5010,10 +6085,6 @@ func (m *TriggerTemplate) Size() (n int) { l = m.OpenWhisk.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.DeprecatedSwitch != nil { - l = m.DeprecatedSwitch.Size() - n += 1 + l + sovGenerated(uint64(l)) - } if m.Log != nil { l = m.Log.Size() n += 1 + l + sovGenerated(uint64(l)) @@ -5022,6 +6093,24 @@ func (m *TriggerTemplate) Size() (n int) { l = m.AzureEventHubs.Size() n += 1 + l + sovGenerated(uint64(l)) } + if m.Pulsar != nil { + l = m.Pulsar.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ConditionsReset) > 0 { + for _, e := range m.ConditionsReset { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AzureServiceBus != nil { + l = m.AzureServiceBus.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Email != nil { + l = m.Email.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -5065,6 +6154,7 @@ func (this *AWSLambdaTrigger) String() string { `Payload:` + repeatedStringForPayload + `,`, `Parameters:` + repeatedStringForParameters + `,`, `InvocationType:` + valueToStringGenerated(this.InvocationType) + `,`, + `RoleARN:` + fmt.Sprintf("%v", this.RoleARN) + `,`, `}`, }, "") return s @@ -5082,7 +6172,7 @@ func (this *ArgoWorkflowTrigger) String() string { `Source:` + strings.Replace(this.Source.String(), "ArtifactLocation", "ArtifactLocation", 1) + `,`, `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, `Parameters:` + repeatedStringForParameters + `,`, - `GroupVersionResource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.GroupVersionResource), "GroupVersionResource", "v11.GroupVersionResource", 1), `&`, ``, 1) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, `}`, }, "") return s @@ -5128,6 +6218,53 @@ func (this *AzureEventHubsTrigger) String() string { }, "") return s } +func (this *AzureServiceBusTrigger) String() string { + if this == nil { + return "nil" + } + repeatedStringForPayload := "[]TriggerParameter{" + for _, f := range this.Payload { + repeatedStringForPayload += strings.Replace(strings.Replace(f.String(), "TriggerParameter", "TriggerParameter", 1), `&`, ``, 1) + "," + } + repeatedStringForPayload += "}" + repeatedStringForParameters := "[]TriggerParameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "TriggerParameter", "TriggerParameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + s := strings.Join([]string{`&AzureServiceBusTrigger{`, + `ConnectionString:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionString), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `QueueName:` + fmt.Sprintf("%v", this.QueueName) + `,`, + `TopicName:` + fmt.Sprintf("%v", this.TopicName) + `,`, + `SubscriptionName:` + fmt.Sprintf("%v", this.SubscriptionName) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `Payload:` + repeatedStringForPayload + `,`, + `Parameters:` + repeatedStringForParameters + `,`, + `}`, + }, "") + return s +} +func (this *ConditionsResetByTime) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConditionsResetByTime{`, + `Cron:` + fmt.Sprintf("%v", this.Cron) + `,`, + `Timezone:` + fmt.Sprintf("%v", this.Timezone) + `,`, + `}`, + }, "") + return s +} +func (this *ConditionsResetCriteria) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConditionsResetCriteria{`, + `ByTime:` + strings.Replace(this.ByTime.String(), "ConditionsResetByTime", "ConditionsResetByTime", 1) + `,`, + `}`, + }, "") + return s +} func (this *CustomTrigger) String() string { if this == nil { return "nil" @@ -5160,7 +6297,6 @@ func (this *CustomTrigger) String() string { `Spec:` + mapStringForSpec + `,`, `Parameters:` + repeatedStringForParameters + `,`, `Payload:` + repeatedStringForPayload + `,`, - `DeprecatedCertFilePath:` + fmt.Sprintf("%v", this.DeprecatedCertFilePath) + `,`, `}`, }, "") return s @@ -5179,14 +6315,26 @@ func (this *DataFilter) String() string { }, "") return s } -func (this *DependencyGroup) String() string { +func (this *EmailTrigger) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&DependencyGroup{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Dependencies:` + fmt.Sprintf("%v", this.Dependencies) + `,`, - `}`, + repeatedStringForParameters := "[]TriggerParameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "TriggerParameter", "TriggerParameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + s := strings.Join([]string{`&EmailTrigger{`, + `Parameters:` + repeatedStringForParameters + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `SMTPPassword:` + strings.Replace(fmt.Sprintf("%v", this.SMTPPassword), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `To:` + fmt.Sprintf("%v", this.To) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, + `Body:` + fmt.Sprintf("%v", this.Body) + `,`, + `}`, }, "") return s } @@ -5199,6 +6347,8 @@ func (this *EventDependency) String() string { `EventSourceName:` + fmt.Sprintf("%v", this.EventSourceName) + `,`, `EventName:` + fmt.Sprintf("%v", this.EventName) + `,`, `Filters:` + strings.Replace(this.Filters.String(), "EventDependencyFilter", "EventDependencyFilter", 1) + `,`, + `Transform:` + strings.Replace(this.Transform.String(), "EventDependencyTransformer", "EventDependencyTransformer", 1) + `,`, + `FiltersLogicalOperator:` + fmt.Sprintf("%v", this.FiltersLogicalOperator) + `,`, `}`, }, "") return s @@ -5222,6 +6372,20 @@ func (this *EventDependencyFilter) String() string { `Context:` + strings.Replace(fmt.Sprintf("%v", this.Context), "EventContext", "EventContext", 1) + `,`, `Data:` + repeatedStringForData + `,`, `Exprs:` + repeatedStringForExprs + `,`, + `DataLogicalOperator:` + fmt.Sprintf("%v", this.DataLogicalOperator) + `,`, + `ExprLogicalOperator:` + fmt.Sprintf("%v", this.ExprLogicalOperator) + `,`, + `Script:` + fmt.Sprintf("%v", this.Script) + `,`, + `}`, + }, "") + return s +} +func (this *EventDependencyTransformer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventDependencyTransformer{`, + `JQ:` + fmt.Sprintf("%v", this.JQ) + `,`, + `Script:` + fmt.Sprintf("%v", this.Script) + `,`, `}`, }, "") return s @@ -5266,7 +6430,7 @@ func (this *GitArtifact) String() string { `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, `Remote:` + strings.Replace(this.Remote.String(), "GitRemoteConfig", "GitRemoteConfig", 1) + `,`, - `DeprecatedSSHKeyPath:` + fmt.Sprintf("%v", this.DeprecatedSSHKeyPath) + `,`, + `InsecureIgnoreHostKey:` + fmt.Sprintf("%v", this.InsecureIgnoreHostKey) + `,`, `}`, }, "") return s @@ -5382,9 +6546,10 @@ func (this *KafkaTrigger) String() string { `FlushFrequency:` + fmt.Sprintf("%v", this.FlushFrequency) + `,`, `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, `Payload:` + repeatedStringForPayload + `,`, - `PartitioningKey:` + fmt.Sprintf("%v", this.PartitioningKey) + `,`, + `PartitioningKey:` + valueToStringGenerated(this.PartitioningKey) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `SASL:` + strings.Replace(fmt.Sprintf("%v", this.SASL), "SASLConfig", "common.SASLConfig", 1) + `,`, + `SchemaRegistry:` + strings.Replace(fmt.Sprintf("%v", this.SchemaRegistry), "SchemaRegistryConfig", "common.SchemaRegistryConfig", 1) + `,`, `}`, }, "") return s @@ -5460,6 +6625,58 @@ func (this *PayloadField) String() string { }, "") return s } +func (this *PulsarTrigger) String() string { + if this == nil { + return "nil" + } + repeatedStringForParameters := "[]TriggerParameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "TriggerParameter", "TriggerParameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + repeatedStringForPayload := "[]TriggerParameter{" + for _, f := range this.Payload { + repeatedStringForPayload += strings.Replace(strings.Replace(f.String(), "TriggerParameter", "TriggerParameter", 1), `&`, ``, 1) + "," + } + repeatedStringForPayload += "}" + keysForAuthAthenzParams := make([]string, 0, len(this.AuthAthenzParams)) + for k := range this.AuthAthenzParams { + keysForAuthAthenzParams = append(keysForAuthAthenzParams, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuthAthenzParams) + mapStringForAuthAthenzParams := "map[string]string{" + for _, k := range keysForAuthAthenzParams { + mapStringForAuthAthenzParams += fmt.Sprintf("%v: %v,", k, this.AuthAthenzParams[k]) + } + mapStringForAuthAthenzParams += "}" + s := strings.Join([]string{`&PulsarTrigger{`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Topic:` + fmt.Sprintf("%v", this.Topic) + `,`, + `Parameters:` + repeatedStringForParameters + `,`, + `Payload:` + repeatedStringForPayload + `,`, + `TLSTrustCertsSecret:` + strings.Replace(fmt.Sprintf("%v", this.TLSTrustCertsSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `TLSAllowInsecureConnection:` + fmt.Sprintf("%v", this.TLSAllowInsecureConnection) + `,`, + `TLSValidateHostname:` + fmt.Sprintf("%v", this.TLSValidateHostname) + `,`, + `TLS:` + strings.Replace(fmt.Sprintf("%v", this.TLS), "TLSConfig", "common.TLSConfig", 1) + `,`, + `AuthTokenSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthTokenSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `ConnectionBackoff:` + strings.Replace(fmt.Sprintf("%v", this.ConnectionBackoff), "Backoff", "common.Backoff", 1) + `,`, + `AuthAthenzParams:` + mapStringForAuthAthenzParams + `,`, + `AuthAthenzSecret:` + strings.Replace(fmt.Sprintf("%v", this.AuthAthenzSecret), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RateLimit) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RateLimit{`, + `Unit:` + fmt.Sprintf("%v", this.Unit) + `,`, + `RequestsPerUnit:` + fmt.Sprintf("%v", this.RequestsPerUnit) + `,`, + `}`, + }, "") + return s +} func (this *Sensor) String() string { if this == nil { return "nil" @@ -5502,20 +6719,25 @@ func (this *SensorSpec) String() string { repeatedStringForTriggers += strings.Replace(strings.Replace(f.String(), "Trigger", "Trigger", 1), `&`, ``, 1) + "," } repeatedStringForTriggers += "}" - repeatedStringForDependencyGroups := "[]DependencyGroup{" - for _, f := range this.DependencyGroups { - repeatedStringForDependencyGroups += strings.Replace(strings.Replace(f.String(), "DependencyGroup", "DependencyGroup", 1), `&`, ``, 1) + "," + keysForLoggingFields := make([]string, 0, len(this.LoggingFields)) + for k := range this.LoggingFields { + keysForLoggingFields = append(keysForLoggingFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLoggingFields) + mapStringForLoggingFields := "map[string]string{" + for _, k := range keysForLoggingFields { + mapStringForLoggingFields += fmt.Sprintf("%v: %v,", k, this.LoggingFields[k]) } - repeatedStringForDependencyGroups += "}" + mapStringForLoggingFields += "}" s := strings.Join([]string{`&SensorSpec{`, `Dependencies:` + repeatedStringForDependencies + `,`, `Triggers:` + repeatedStringForTriggers + `,`, `Template:` + strings.Replace(this.Template.String(), "Template", "Template", 1) + `,`, - `DependencyGroups:` + repeatedStringForDependencyGroups + `,`, `ErrorOnFailedRound:` + fmt.Sprintf("%v", this.ErrorOnFailedRound) + `,`, `EventBusName:` + fmt.Sprintf("%v", this.EventBusName) + `,`, - `DeprecatedCircuit:` + fmt.Sprintf("%v", this.DeprecatedCircuit) + `,`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `LoggingFields:` + mapStringForLoggingFields + `,`, `}`, }, "") return s @@ -5530,6 +6752,28 @@ func (this *SensorStatus) String() string { }, "") return s } +func (this *SlackSender) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SlackSender{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Icon:` + fmt.Sprintf("%v", this.Icon) + `,`, + `}`, + }, "") + return s +} +func (this *SlackThread) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SlackThread{`, + `MessageAggregationKey:` + fmt.Sprintf("%v", this.MessageAggregationKey) + `,`, + `BroadcastMessageToChannel:` + fmt.Sprintf("%v", this.BroadcastMessageToChannel) + `,`, + `}`, + }, "") + return s +} func (this *SlackTrigger) String() string { if this == nil { return "nil" @@ -5544,6 +6788,10 @@ func (this *SlackTrigger) String() string { `SlackToken:` + strings.Replace(fmt.Sprintf("%v", this.SlackToken), "SecretKeySelector", "v1.SecretKeySelector", 1) + `,`, `Channel:` + fmt.Sprintf("%v", this.Channel) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Attachments:` + fmt.Sprintf("%v", this.Attachments) + `,`, + `Blocks:` + fmt.Sprintf("%v", this.Blocks) + `,`, + `Thread:` + strings.Replace(strings.Replace(this.Thread.String(), "SlackThread", "SlackThread", 1), `&`, ``, 1) + `,`, + `Sender:` + strings.Replace(strings.Replace(this.Sender.String(), "SlackSender", "SlackSender", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -5558,7 +6806,6 @@ func (this *StandardK8STrigger) String() string { } repeatedStringForParameters += "}" s := strings.Join([]string{`&StandardK8STrigger{`, - `GroupVersionResource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.GroupVersionResource), "GroupVersionResource", "v11.GroupVersionResource", 1), `&`, ``, 1) + `,`, `Source:` + strings.Replace(this.Source.String(), "ArtifactLocation", "ArtifactLocation", 1) + `,`, `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, `Parameters:` + repeatedStringForParameters + `,`, @@ -5648,6 +6895,8 @@ func (this *Trigger) String() string { `Parameters:` + repeatedStringForParameters + `,`, `Policy:` + strings.Replace(this.Policy.String(), "TriggerPolicy", "TriggerPolicy", 1) + `,`, `RetryStrategy:` + strings.Replace(fmt.Sprintf("%v", this.RetryStrategy), "Backoff", "common.Backoff", 1) + `,`, + `RateLimit:` + strings.Replace(this.RateLimit.String(), "RateLimit", "RateLimit", 1) + `,`, + `AtLeastOnce:` + fmt.Sprintf("%v", this.AtLeastOnce) + `,`, `}`, }, "") return s @@ -5675,6 +6924,7 @@ func (this *TriggerParameterSource) String() string { `DataKey:` + fmt.Sprintf("%v", this.DataKey) + `,`, `DataTemplate:` + fmt.Sprintf("%v", this.DataTemplate) + `,`, `Value:` + valueToStringGenerated(this.Value) + `,`, + `UseRawData:` + fmt.Sprintf("%v", this.UseRawData) + `,`, `}`, }, "") return s @@ -5690,21 +6940,15 @@ func (this *TriggerPolicy) String() string { }, "") return s } -func (this *TriggerSwitch) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TriggerSwitch{`, - `Any:` + fmt.Sprintf("%v", this.Any) + `,`, - `All:` + fmt.Sprintf("%v", this.All) + `,`, - `}`, - }, "") - return s -} func (this *TriggerTemplate) String() string { if this == nil { return "nil" } + repeatedStringForConditionsReset := "[]ConditionsResetCriteria{" + for _, f := range this.ConditionsReset { + repeatedStringForConditionsReset += strings.Replace(strings.Replace(f.String(), "ConditionsResetCriteria", "ConditionsResetCriteria", 1), `&`, ``, 1) + "," + } + repeatedStringForConditionsReset += "}" s := strings.Join([]string{`&TriggerTemplate{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Conditions:` + fmt.Sprintf("%v", this.Conditions) + `,`, @@ -5717,9 +6961,12 @@ func (this *TriggerTemplate) String() string { `NATS:` + strings.Replace(this.NATS.String(), "NATSTrigger", "NATSTrigger", 1) + `,`, `Slack:` + strings.Replace(this.Slack.String(), "SlackTrigger", "SlackTrigger", 1) + `,`, `OpenWhisk:` + strings.Replace(this.OpenWhisk.String(), "OpenWhiskTrigger", "OpenWhiskTrigger", 1) + `,`, - `DeprecatedSwitch:` + strings.Replace(this.DeprecatedSwitch.String(), "TriggerSwitch", "TriggerSwitch", 1) + `,`, `Log:` + strings.Replace(this.Log.String(), "LogTrigger", "LogTrigger", 1) + `,`, `AzureEventHubs:` + strings.Replace(this.AzureEventHubs.String(), "AzureEventHubsTrigger", "AzureEventHubsTrigger", 1) + `,`, + `Pulsar:` + strings.Replace(this.Pulsar.String(), "PulsarTrigger", "PulsarTrigger", 1) + `,`, + `ConditionsReset:` + repeatedStringForConditionsReset + `,`, + `AzureServiceBus:` + strings.Replace(this.AzureServiceBus.String(), "AzureServiceBusTrigger", "AzureServiceBusTrigger", 1) + `,`, + `Email:` + strings.Replace(this.Email.String(), "EmailTrigger", "EmailTrigger", 1) + `,`, `}`, }, "") return s @@ -6009,6 +7256,38 @@ func (m *AWSLambdaTrigger) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.InvocationType = &s iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleARN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RoleARN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6163,9 +7442,9 @@ func (m *ArgoWorkflowTrigger) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersionResource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6175,24 +7454,23 @@ func (m *ArgoWorkflowTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.GroupVersionResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -6768,7 +8046,7 @@ func (m *AzureEventHubsTrigger) Unmarshal(dAtA []byte) error { } return nil } -func (m *CustomTrigger) Unmarshal(dAtA []byte) error { +func (m *AzureServiceBusTrigger) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6791,15 +8069,51 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CustomTrigger: wiretype end group for non-group") + return fmt.Errorf("proto: AzureServiceBusTrigger: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CustomTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AzureServiceBusTrigger: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerURL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionString", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConnectionString == nil { + m.ConnectionString = &v1.SecretKeySelector{} + } + if err := m.ConnectionString.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QueueName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6827,13 +8141,13 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ServerURL = string(dAtA[iNdEx:postIndex]) + m.QueueName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Secure", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopicName", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6843,15 +8157,59 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.Secure = bool(v != 0) - case 3: + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopicName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CertSecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6878,18 +8236,18 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CertSecret == nil { - m.CertSecret = &v1.SecretKeySelector{} + if m.TLS == nil { + m.TLS = &common.TLSConfig{} } - if err := m.CertSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerNameOverride", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6899,27 +8257,29 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ServerNameOverride = string(dAtA[iNdEx:postIndex]) + m.Payload = append(m.Payload, TriggerParameter{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6946,109 +8306,66 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Spec == nil { - m.Spec = make(map[string]string) + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Spec[mapkey] = mapvalue iNdEx = postIndex - case 6: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConditionsResetByTime) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConditionsResetByTime: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConditionsResetByTime: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cron", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7058,31 +8375,29 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, TriggerParameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Cron = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Timezone", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7092,31 +8407,79 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Payload = append(m.Payload, TriggerParameter{}) - if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Timezone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 8: + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConditionsResetCriteria) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConditionsResetCriteria: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConditionsResetCriteria: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCertFilePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByTime", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7126,23 +8489,27 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedCertFilePath = string(dAtA[iNdEx:postIndex]) + if m.ByTime == nil { + m.ByTime = &ConditionsResetByTime{} + } + if err := m.ByTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -7165,7 +8532,7 @@ func (m *CustomTrigger) Unmarshal(dAtA []byte) error { } return nil } -func (m *DataFilter) Unmarshal(dAtA []byte) error { +func (m *CustomTrigger) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7188,15 +8555,15 @@ func (m *DataFilter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DataFilter: wiretype end group for non-group") + return fmt.Errorf("proto: CustomTrigger: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DataFilter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomTrigger: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServerURL", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7224,13 +8591,13 @@ func (m *DataFilter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.ServerURL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Secure", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7240,29 +8607,17 @@ func (m *DataFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = JSONType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.Secure = bool(v != 0) case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CertSecret", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7272,27 +8627,31 @@ func (m *DataFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + if m.CertSecret == nil { + m.CertSecret = &v1.SecretKeySelector{} + } + if err := m.CertSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Comparator", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ServerNameOverride", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7320,13 +8679,13 @@ func (m *DataFilter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Comparator = Comparator(dAtA[iNdEx:postIndex]) + m.ServerNameOverride = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7336,79 +8695,124 @@ func (m *DataFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Template = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DependencyGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Spec == nil { + m.Spec = make(map[string]string) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DependencyGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DependencyGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Spec[mapkey] = mapvalue + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7418,29 +8822,31 @@ func (m *DependencyGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7450,23 +8856,25 @@ func (m *DependencyGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Dependencies = append(m.Dependencies, string(dAtA[iNdEx:postIndex])) + m.Payload = append(m.Payload, TriggerParameter{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -7489,7 +8897,7 @@ func (m *DependencyGroup) Unmarshal(dAtA []byte) error { } return nil } -func (m *Event) Unmarshal(dAtA []byte) error { +func (m *DataFilter) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -7512,17 +8920,17 @@ func (m *Event) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") + return fmt.Errorf("proto: DataFilter: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DataFilter: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7532,33 +8940,29 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Context == nil { - m.Context = &EventContext{} - } - if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7568,79 +8972,27 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + byteLen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } + m.Type = JSONType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7668,11 +9020,11 @@ func (m *EventContext) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Comparator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7700,11 +9052,11 @@ func (m *EventContext) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Source = string(dAtA[iNdEx:postIndex]) + m.Comparator = Comparator(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7732,13 +9084,63 @@ func (m *EventContext) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SpecVersion = string(dAtA[iNdEx:postIndex]) + m.Template = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmailTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmailTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmailTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7748,27 +9150,29 @@ func (m *EventContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = string(dAtA[iNdEx:postIndex]) + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataContentType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7796,13 +9200,13 @@ func (m *EventContext) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DataContentType = string(dAtA[iNdEx:postIndex]) + m.Username = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SMTPPassword", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7812,29 +9216,33 @@ func (m *EventContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Subject = string(dAtA[iNdEx:postIndex]) + if m.SMTPPassword == nil { + m.SMTPPassword = &v1.SecretKeySelector{} + } + if err := m.SMTPPassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 7: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7844,78 +9252,46 @@ func (m *EventContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Host = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventDependency) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventDependency: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventDependency: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7943,11 +9319,11 @@ func (m *EventDependency) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.To = append(m.To, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventSourceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -7975,11 +9351,11 @@ func (m *EventDependency) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EventSourceName = string(dAtA[iNdEx:postIndex]) + m.From = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8007,13 +9383,13 @@ func (m *EventDependency) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EventName = string(dAtA[iNdEx:postIndex]) + m.Subject = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8023,27 +9399,23 @@ func (m *EventDependency) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Filters == nil { - m.Filters = &EventDependencyFilter{} - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Body = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8066,7 +9438,7 @@ func (m *EventDependency) Unmarshal(dAtA []byte) error { } return nil } -func (m *EventDependencyFilter) Unmarshal(dAtA []byte) error { +func (m *Event) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8089,49 +9461,13 @@ func (m *EventDependencyFilter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EventDependencyFilter: wiretype end group for non-group") + return fmt.Errorf("proto: Event: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EventDependencyFilter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Time == nil { - m.Time = &TimeFilter{} - } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } @@ -8167,45 +9503,11 @@ func (m *EventDependencyFilter) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data, DataFilter{}) - if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exprs", wireType) - } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8215,24 +9517,24 @@ func (m *EventDependencyFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Exprs = append(m.Exprs, ExprFilter{}) - if err := m.Exprs[len(m.Exprs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} } iNdEx = postIndex default: @@ -8256,7 +9558,7 @@ func (m *EventDependencyFilter) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExprFilter) Unmarshal(dAtA []byte) error { +func (m *EventContext) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8279,15 +9581,15 @@ func (m *ExprFilter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExprFilter: wiretype end group for non-group") + return fmt.Errorf("proto: EventContext: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExprFilter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventContext: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8315,13 +9617,13 @@ func (m *ExprFilter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Expr = string(dAtA[iNdEx:postIndex]) + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8331,79 +9633,27 @@ func (m *ExprFilter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Fields = append(m.Fields, PayloadField{}) - if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Source = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8431,61 +9681,11 @@ func (m *FileArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.SpecVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GitArtifact) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GitArtifact: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GitArtifact: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8513,11 +9713,11 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CloneDirectory", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DataContentType", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8545,13 +9745,13 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.CloneDirectory = string(dAtA[iNdEx:postIndex]) + m.DataContentType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8561,31 +9761,27 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Creds == nil { - m.Creds = &GitCreds{} - } - if err := m.Creds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Subject = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SSHKeySecret", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8612,16 +9808,63 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SSHKeySecret == nil { - m.SSHKeySecret = &v1.SecretKeySelector{} - } - if err := m.SSHKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventDependency) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDependency: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDependency: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8649,11 +9892,11 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.FilePath = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Branch", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventSourceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8681,11 +9924,11 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Branch = string(dAtA[iNdEx:postIndex]) + m.EventSourceName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EventName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8713,13 +9956,13 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Tag = string(dAtA[iNdEx:postIndex]) + m.EventName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8729,27 +9972,31 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ref = string(dAtA[iNdEx:postIndex]) + if m.Filters == nil { + m.Filters = &EventDependencyFilter{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Transform", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8776,16 +10023,16 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Remote == nil { - m.Remote = &GitRemoteConfig{} + if m.Transform == nil { + m.Transform = &EventDependencyTransformer{} } - if err := m.Remote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Transform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSSHKeyPath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FiltersLogicalOperator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -8813,7 +10060,7 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedSSHKeyPath = string(dAtA[iNdEx:postIndex]) + m.FiltersLogicalOperator = LogicalOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8836,7 +10083,7 @@ func (m *GitArtifact) Unmarshal(dAtA []byte) error { } return nil } -func (m *GitCreds) Unmarshal(dAtA []byte) error { +func (m *EventDependencyFilter) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -8859,15 +10106,15 @@ func (m *GitCreds) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GitCreds: wiretype end group for non-group") + return fmt.Errorf("proto: EventDependencyFilter: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GitCreds: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventDependencyFilter: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8894,16 +10141,16 @@ func (m *GitCreds) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Username == nil { - m.Username = &v1.SecretKeySelector{} + if m.Time == nil { + m.Time = &TimeFilter{} } - if err := m.Username.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -8930,66 +10177,84 @@ func (m *GitCreds) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Password == nil { - m.Password = &v1.SecretKeySelector{} + if m.Context == nil { + m.Context = &EventContext{} } - if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GitRemoteConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.Data = append(m.Data, DataFilter{}) + if err := m.Data[len(m.Data)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exprs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Exprs = append(m.Exprs, ExprFilter{}) + if err := m.Exprs[len(m.Exprs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GitRemoteConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GitRemoteConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DataLogicalOperator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9017,11 +10282,11 @@ func (m *GitRemoteConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.DataLogicalOperator = LogicalOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExprLogicalOperator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9049,18 +10314,50 @@ func (m *GitRemoteConfig) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URLS = append(m.URLS, string(dAtA[iNdEx:postIndex])) + m.ExprLogicalOperator = LogicalOperator(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Script = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy @@ -9072,7 +10369,7 @@ func (m *GitRemoteConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { +func (m *EventDependencyTransformer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9095,15 +10392,15 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HTTPTrigger: wiretype end group for non-group") + return fmt.Errorf("proto: EventDependencyTransformer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EventDependencyTransformer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field JQ", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9131,13 +10428,13 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.JQ = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9147,65 +10444,77 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Payload = append(m.Payload, TriggerParameter{}) - if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Script = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExprFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExprFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExprFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Expr", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9233,11 +10542,11 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Method = string(dAtA[iNdEx:postIndex]) + m.Expr = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9264,71 +10573,66 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, TriggerParameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Fields = append(m.Fields, PayloadField{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) - } - m.Timeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timeout |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BasicAuth", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.BasicAuth == nil { - m.BasicAuth = &common.BasicAuth{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if err := m.BasicAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 8: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9338,44 +10642,1166 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Headers == nil { - m.Headers = make(map[string]string) + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitArtifact) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitArtifact: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitArtifact: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CloneDirectory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CloneDirectory = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Creds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Creds == nil { + m.Creds = &GitCreds{} + } + if err := m.Creds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SSHKeySecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SSHKeySecret == nil { + m.SSHKeySecret = &v1.SecretKeySelector{} + } + if err := m.SSHKeySecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Branch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Branch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Remote == nil { + m.Remote = &GitRemoteConfig{} + } + if err := m.Remote.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureIgnoreHostKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureIgnoreHostKey = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitCreds) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitCreds: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitCreds: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Username == nil { + m.Username = &v1.SecretKeySelector{} + } + if err := m.Username.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Password == nil { + m.Password = &v1.SecretKeySelector{} + } + if err := m.Password.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRemoteConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRemoteConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRemoteConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URLS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URLS = append(m.URLS, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload, TriggerParameter{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Method = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + m.Timeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timeout |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BasicAuth", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BasicAuth == nil { + m.BasicAuth = &common.BasicAuth{} + } + if err := m.BasicAuth.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Headers[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecureHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecureHeaders = append(m.SecureHeaders, &common.SecureHeader{}) + if err := m.SecureHeaders[len(m.SecureHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *K8SResourcePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: K8SResourcePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: K8SResourcePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) if fieldNum == 1 { var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { @@ -9449,11 +11875,716 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Headers[mapkey] = mapvalue + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backoff", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backoff == nil { + m.Backoff = &common.Backoff{} + } + if err := m.Backoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorOnBackoffTimeout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ErrorOnBackoffTimeout = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KafkaTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KafkaTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Topic = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredAcks", wireType) + } + m.RequiredAcks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequiredAcks |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Compress", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Compress = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FlushFrequency", wireType) + } + m.FlushFrequency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FlushFrequency |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload, TriggerParameter{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PartitioningKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PartitioningKey = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SASL == nil { + m.SASL = &common.SASLConfig{} + } + if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaRegistry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SchemaRegistry == nil { + m.SchemaRegistry = &common.SchemaRegistryConfig{} + } + if err := m.SchemaRegistry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + } + m.IntervalSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntervalSeconds |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NATSTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NATSTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NATSTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subject = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload, TriggerParameter{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 9: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecureHeaders", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9480,8 +12611,44 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.SecureHeaders = append(m.SecureHeaders, &common.SecureHeader{}) - if err := m.SecureHeaders[len(m.SecureHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9506,7 +12673,7 @@ func (m *HTTPTrigger) Unmarshal(dAtA []byte) error { } return nil } -func (m *K8SResourcePolicy) Unmarshal(dAtA []byte) error { +func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9529,15 +12696,179 @@ func (m *K8SResourcePolicy) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: K8SResourcePolicy: wiretype end group for non-group") + return fmt.Errorf("proto: OpenWhiskTrigger: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: K8SResourcePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OpenWhiskTrigger: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuthToken == nil { + m.AuthToken = &v1.SecretKeySelector{} + } + if err := m.AuthToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ActionName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ActionName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9564,107 +12895,14 @@ func (m *K8SResourcePolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + m.Payload = append(m.Payload, TriggerParameter{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Labels[mapkey] = mapvalue iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backoff", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9691,33 +12929,11 @@ func (m *K8SResourcePolicy) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Backoff == nil { - m.Backoff = &common.Backoff{} - } - if err := m.Backoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorOnBackoffTimeout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ErrorOnBackoffTimeout = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9739,7 +12955,7 @@ func (m *K8SResourcePolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { +func (m *PayloadField) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9762,15 +12978,15 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: KafkaTrigger: wiretype end group for non-group") + return fmt.Errorf("proto: PayloadField: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: KafkaTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PayloadField: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9798,11 +13014,11 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9830,124 +13046,63 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Topic = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + msglen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, TriggerParameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredAcks", wireType) - } - m.RequiredAcks = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RequiredAcks |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Compress", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PulsarTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - m.Compress = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FlushFrequency", wireType) + if iNdEx >= l { + return io.ErrUnexpectedEOF } - m.FlushFrequency = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FlushFrequency |= int32(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break } - case 8: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PulsarTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PulsarTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9957,33 +13112,29 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} - } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.URL = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9993,31 +13144,29 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Payload = append(m.Payload, TriggerParameter{}) - if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Topic = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 10: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PartitioningKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10027,29 +13176,31 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.PartitioningKey = string(dAtA[iNdEx:postIndex]) + m.Parameters = append(m.Parameters, TriggerParameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 11: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10059,27 +13210,29 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Payload = append(m.Payload, TriggerParameter{}) + if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 12: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SASL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLSTrustCertsSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10106,68 +13259,18 @@ func (m *KafkaTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.SASL == nil { - m.SASL = &common.SASLConfig{} + if m.TLSTrustCertsSecret == nil { + m.TLSTrustCertsSecret = &v1.SecretKeySelector{} } - if err := m.SASL.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.TLSTrustCertsSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LogTrigger) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LogTrigger: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LogTrigger: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLSAllowInsecureConnection", wireType) } - m.IntervalSeconds = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10177,66 +13280,37 @@ func (m *LogTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IntervalSeconds |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NATSTrigger) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + m.TLSAllowInsecureConnection = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TLSValidateHostname", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NATSTrigger: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NATSTrigger: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.TLSValidateHostname = bool(v != 0) + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10246,29 +13320,33 @@ func (m *NATSTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.URL = string(dAtA[iNdEx:postIndex]) + if m.TLS == nil { + m.TLS = &common.TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuthTokenSecret", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10278,27 +13356,31 @@ func (m *NATSTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Subject = string(dAtA[iNdEx:postIndex]) + if m.AuthTokenSecret == nil { + m.AuthTokenSecret = &v1.SecretKeySelector{} + } + if err := m.AuthTokenSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionBackoff", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10325,14 +13407,16 @@ func (m *NATSTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Payload = append(m.Payload, TriggerParameter{}) - if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ConnectionBackoff == nil { + m.ConnectionBackoff = &common.Backoff{} + } + if err := m.ConnectionBackoff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuthAthenzParams", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10359,14 +13443,107 @@ func (m *NATSTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, TriggerParameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if m.AuthAthenzParams == nil { + m.AuthAthenzParams = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } + m.AuthAthenzParams[mapkey] = mapvalue iNdEx = postIndex - case 5: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AuthAthenzSecret", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10393,10 +13570,10 @@ func (m *NATSTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.TLS == nil { - m.TLS = &common.TLSConfig{} + if m.AuthAthenzSecret == nil { + m.AuthAthenzSecret = &v1.SecretKeySelector{} } - if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AuthAthenzSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10421,7 +13598,7 @@ func (m *NATSTrigger) Unmarshal(dAtA []byte) error { } return nil } -func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { +func (m *RateLimit) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10444,15 +13621,15 @@ func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OpenWhiskTrigger: wiretype end group for non-group") + return fmt.Errorf("proto: RateLimit: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OpenWhiskTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RateLimit: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -10480,13 +13657,13 @@ func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Unit = RateLimiteUnit(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestsPerUnit", wireType) } - var stringLen uint64 + m.RequestsPerUnit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10496,59 +13673,64 @@ func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.RequestsPerUnit |= int32(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sensor) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sensor: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sensor: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AuthToken", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10575,48 +13757,13 @@ func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AuthToken == nil { - m.AuthToken = &v1.SecretKeySelector{} - } - if err := m.AuthToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ActionName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ActionName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10643,14 +13790,13 @@ func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Payload = append(m.Payload, TriggerParameter{}) - if err := m.Payload[len(m.Payload)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10677,8 +13823,7 @@ func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Parameters = append(m.Parameters, TriggerParameter{}) - if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -10703,7 +13848,7 @@ func (m *OpenWhiskTrigger) Unmarshal(dAtA []byte) error { } return nil } -func (m *PayloadField) Unmarshal(dAtA []byte) error { +func (m *SensorList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10726,17 +13871,17 @@ func (m *PayloadField) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PayloadField: wiretype end group for non-group") + return fmt.Errorf("proto: SensorList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PayloadField: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SensorList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10746,29 +13891,30 @@ func (m *PayloadField) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -10778,23 +13924,25 @@ func (m *PayloadField) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, Sensor{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -10817,7 +13965,7 @@ func (m *PayloadField) Unmarshal(dAtA []byte) error { } return nil } -func (m *Sensor) Unmarshal(dAtA []byte) error { +func (m *SensorSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10840,15 +13988,15 @@ func (m *Sensor) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Sensor: wiretype end group for non-group") + return fmt.Errorf("proto: SensorSpec: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Sensor: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SensorSpec: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10875,13 +14023,14 @@ func (m *Sensor) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Dependencies = append(m.Dependencies, EventDependency{}) + if err := m.Dependencies[len(m.Dependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10908,13 +14057,142 @@ func (m *Sensor) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Triggers = append(m.Triggers, Trigger{}) + if err := m.Triggers[len(m.Triggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &Template{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ErrorOnFailedRound", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ErrorOnFailedRound = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventBusName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EventBusName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoggingFields", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -10931,19 +14209,113 @@ func (m *Sensor) Unmarshal(dAtA []byte) error { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LoggingFields == nil { + m.LoggingFields = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.LoggingFields[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -10966,7 +14338,7 @@ func (m *Sensor) Unmarshal(dAtA []byte) error { } return nil } -func (m *SensorList) Unmarshal(dAtA []byte) error { +func (m *SensorStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -10989,48 +14361,15 @@ func (m *SensorList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SensorList: wiretype end group for non-group") + return fmt.Errorf("proto: SensorStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SensorList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SensorStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -11057,8 +14396,7 @@ func (m *SensorList) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, Sensor{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -11083,7 +14421,7 @@ func (m *SensorList) Unmarshal(dAtA []byte) error { } return nil } -func (m *SensorSpec) Unmarshal(dAtA []byte) error { +func (m *SlackSender) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11106,173 +14444,15 @@ func (m *SensorSpec) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SensorSpec: wiretype end group for non-group") + return fmt.Errorf("proto: SlackSender: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SensorSpec: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SlackSender: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dependencies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dependencies = append(m.Dependencies, EventDependency{}) - if err := m.Dependencies[len(m.Dependencies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Triggers = append(m.Triggers, Trigger{}) - if err := m.Triggers[len(m.Triggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Template == nil { - m.Template = &Template{} - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DependencyGroups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DependencyGroups = append(m.DependencyGroups, DependencyGroup{}) - if err := m.DependencyGroups[len(m.DependencyGroups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ErrorOnFailedRound", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ErrorOnFailedRound = bool(v != 0) - case 6: + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventBusName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11300,11 +14480,11 @@ func (m *SensorSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.EventBusName = string(dAtA[iNdEx:postIndex]) + m.Username = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 7: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCircuit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Icon", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -11332,28 +14512,8 @@ func (m *SensorSpec) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.DeprecatedCircuit = string(dAtA[iNdEx:postIndex]) + m.Icon = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11375,7 +14535,7 @@ func (m *SensorSpec) Unmarshal(dAtA []byte) error { } return nil } -func (m *SensorStatus) Unmarshal(dAtA []byte) error { +func (m *SlackThread) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -11398,17 +14558,17 @@ func (m *SensorStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SensorStatus: wiretype end group for non-group") + return fmt.Errorf("proto: SlackThread: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SensorStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SlackThread: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MessageAggregationKey", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -11418,25 +14578,44 @@ func (m *SensorStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.MessageAggregationKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BroadcastMessageToChannel", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BroadcastMessageToChannel = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11621,6 +14800,136 @@ func (m *SlackTrigger) Unmarshal(dAtA []byte) error { } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachments", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attachments = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blocks", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Blocks = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Thread", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Thread.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Sender.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -11672,39 +14981,6 @@ func (m *StandardK8STrigger) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersionResource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GroupVersionResource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) } @@ -11740,7 +15016,7 @@ func (m *StandardK8STrigger) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) } @@ -11772,7 +15048,7 @@ func (m *StandardK8STrigger) Unmarshal(dAtA []byte) error { } m.Operation = KubernetesResourceOperation(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } @@ -11806,7 +15082,7 @@ func (m *StandardK8STrigger) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field PatchStrategy", wireType) } @@ -11838,7 +15114,7 @@ func (m *StandardK8STrigger) Unmarshal(dAtA []byte) error { } m.PatchStrategy = k8s_io_apimachinery_pkg_types.PatchType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field LiveObject", wireType) } @@ -12763,7 +16039,43 @@ func (m *Trigger) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field RetryStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RetryStrategy == nil { + m.RetryStrategy = &common.Backoff{} + } + if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimit", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -12790,13 +16102,33 @@ func (m *Trigger) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.RetryStrategy == nil { - m.RetryStrategy = &common.Backoff{} + if m.RateLimit == nil { + m.RateLimit = &RateLimit{} } - if err := m.RetryStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.RateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AtLeastOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AtLeastOnce = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13190,6 +16522,26 @@ func (m *TriggerParameterSource) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.Value = &s iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseRawData", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseRawData = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13333,120 +16685,6 @@ func (m *TriggerPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *TriggerSwitch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TriggerSwitch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TriggerSwitch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Any", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Any = append(m.Any, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.All = append(m.All, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *TriggerTemplate) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -13866,7 +17104,7 @@ func (m *TriggerTemplate) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSwitch", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13893,16 +17131,16 @@ func (m *TriggerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.DeprecatedSwitch == nil { - m.DeprecatedSwitch = &TriggerSwitch{} + if m.Log == nil { + m.Log = &LogTrigger{} } - if err := m.DeprecatedSwitch.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Log.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AzureEventHubs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13929,16 +17167,16 @@ func (m *TriggerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Log == nil { - m.Log = &LogTrigger{} + if m.AzureEventHubs == nil { + m.AzureEventHubs = &AzureEventHubsTrigger{} } - if err := m.Log.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.AzureEventHubs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureEventHubs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pulsar", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -13965,10 +17203,116 @@ func (m *TriggerTemplate) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.AzureEventHubs == nil { - m.AzureEventHubs = &AzureEventHubsTrigger{} + if m.Pulsar == nil { + m.Pulsar = &PulsarTrigger{} } - if err := m.AzureEventHubs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Pulsar.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConditionsReset", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConditionsReset = append(m.ConditionsReset, ConditionsResetCriteria{}) + if err := m.ConditionsReset[len(m.ConditionsReset)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureServiceBus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureServiceBus == nil { + m.AzureServiceBus = &AzureServiceBusTrigger{} + } + if err := m.AzureServiceBus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Email == nil { + m.Email = &EmailTrigger{} + } + if err := m.Email.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/pkg/apis/sensor/v1alpha1/generated.proto b/pkg/apis/sensor/v1alpha1/generated.proto index b15550e8fa..b44ad7044a 100644 --- a/pkg/apis/sensor/v1alpha1/generated.proto +++ b/pkg/apis/sensor/v1alpha1/generated.proto @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,7 +16,7 @@ limitations under the License. // This file was autogenerated by go-to-protobuf. Do not edit it manually! -syntax = 'proto2'; +syntax = "proto2"; package github.com.argoproj.argo_events.pkg.apis.sensor.v1alpha1; @@ -27,17 +27,19 @@ import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". -option go_package = "v1alpha1"; +option go_package = "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1"; // AWSLambdaTrigger refers to specification of the trigger to invoke an AWS Lambda function message AWSLambdaTrigger { // FunctionName refers to the name of the function to invoke. optional string functionName = 1; - // AccessKey refers K8 secret containing aws access key + // AccessKey refers K8s secret containing aws access key + // +optional optional k8s.io.api.core.v1.SecretKeySelector accessKey = 2; - // SecretKey refers K8 secret containing aws secret key + // SecretKey refers K8s secret containing aws secret key + // +optional optional k8s.io.api.core.v1.SecretKeySelector secretKey = 3; // Region is AWS region @@ -65,11 +67,15 @@ message AWSLambdaTrigger { // has permission to invoke the function. // +optional optional string invocationType = 7; + + // RoleARN is the Amazon Resource Name (ARN) of the role to assume. + // +optional + optional string roleARN = 8; } // ArgoWorkflowTrigger is the trigger for the Argo Workflow message ArgoWorkflowTrigger { - // Source of the K8 resource file(s) + // Source of the K8s resource file(s) optional ArtifactLocation source = 1; // Operation refers to the type of operation performed on the argo workflow resource. @@ -80,8 +86,8 @@ message ArgoWorkflowTrigger { // Parameters is the list of parameters to pass to resolved Argo Workflow object repeated TriggerParameter parameters = 3; - // The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource groupVersionResource = 4; + // Args is the list of arguments to pass to the argo CLI + repeated string args = 4; } // ArtifactLocation describes the source location for an external artifact @@ -131,6 +137,45 @@ message AzureEventHubsTrigger { repeated TriggerParameter parameters = 6; } +message AzureServiceBusTrigger { + // ConnectionString is the connection string for the Azure Service Bus + optional k8s.io.api.core.v1.SecretKeySelector connectionString = 1; + + // QueueName is the name of the Azure Service Bus Queue + optional string queueName = 2; + + // TopicName is the name of the Azure Service Bus Topic + optional string topicName = 3; + + // SubscriptionName is the name of the Azure Service Bus Topic Subscription + optional string subscriptionName = 4; + + // TLS configuration for the service bus client + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.TLSConfig tls = 5; + + // Payload is the list of key-value extracted from an event payload to construct the request payload. + repeated TriggerParameter payload = 6; + + // Parameters is the list of key-value extracted from event's payload that are applied to + // the trigger resource. + // +optional + repeated TriggerParameter parameters = 7; +} + +message ConditionsResetByTime { + // Cron is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron + optional string cron = 1; + + // +optional + optional string timezone = 2; +} + +message ConditionsResetCriteria { + // Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron + optional ConditionsResetByTime byTime = 1; +} + // CustomTrigger refers to the specification of the custom trigger. message CustomTrigger { // ServerURL is the url of the gRPC server that executes custom trigger @@ -153,10 +198,6 @@ message CustomTrigger { // Payload is the list of key-value extracted from an event payload to construct the request payload. repeated TriggerParameter payload = 7; - - // DeprecatedCertFilePath is path to the cert file within sensor for secure connection between sensor and custom trigger gRPC server. - // Deprecated: will be removed in v1.5, use CertSecret instead - optional string certFilePath = 8; } // DataFilter describes constraints and filters for event data @@ -191,13 +232,44 @@ message DataFilter { optional string template = 5; } -// DependencyGroup is the group of dependencies -message DependencyGroup { - // Name of the group - optional string name = 1; +// EmailTrigger refers to the specification of the email notification trigger. +message EmailTrigger { + // Parameters is the list of key-value extracted from event's payload that are applied to + // the trigger resource. + // +optional + repeated TriggerParameter parameters = 1; + + // Username refers to the username used to connect to the smtp server. + // +optional + optional string username = 2; - // Dependencies of events - repeated string dependencies = 2; + // SMTPPassword refers to the Kubernetes secret that holds the smtp password used to connect to smtp server. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector smtpPassword = 3; + + // Host refers to the smtp host url to which email is send. + optional string host = 4; + + // Port refers to the smtp server port to which email is send. + // Defaults to 0. + // +optional + optional int32 port = 5; + + // To refers to the email addresses to which the emails are send. + // +optional + repeated string to = 6; + + // From refers to the address from which the email is send from. + // +optional + optional string from = 7; + + // Subject refers to the subject line for the email send. + // +optional + optional string subject = 8; + + // Body refers to the body/content of the email send. + // +optional + optional string body = 9; } // Event represents the cloudevent received from an event source. @@ -246,6 +318,14 @@ message EventDependency { // Filters and rules governing toleration of success and constraints on the context and data of an event optional EventDependencyFilter filters = 4; + + // Transform transforms the event data + optional EventDependencyTransformer transform = 5; + + // FiltersLogicalOperator defines how different filters are evaluated together. + // Available values: and (&&), or (||) + // Is optional and if left blank treated as and (&&). + optional string filtersLogicalOperator = 6; } // EventDependencyFilter defines filters and constraints for a event. @@ -261,6 +341,30 @@ message EventDependencyFilter { // Exprs contains the list of expressions evaluated against the event payload. repeated ExprFilter exprs = 4; + + // DataLogicalOperator defines how multiple Data filters (if defined) are evaluated together. + // Available values: and (&&), or (||) + // Is optional and if left blank treated as and (&&). + optional string dataLogicalOperator = 5; + + // ExprLogicalOperator defines how multiple Exprs filters (if defined) are evaluated together. + // Available values: and (&&), or (||) + // Is optional and if left blank treated as and (&&). + optional string exprLogicalOperator = 6; + + // Script refers to a Lua script evaluated to determine the validity of an event. + optional string script = 7; +} + +// EventDependencyTransformer transforms the event +message EventDependencyTransformer { + // JQ holds the jq command applied for transformation + // +optional + optional string jq = 1; + + // Script refers to a Lua script used to transform the event + // +optional + optional string script = 2; } message ExprFilter { @@ -313,11 +417,9 @@ message GitArtifact { // +optional optional GitRemoteConfig remote = 9; - // DeprecatedSSHKeyPath is path to your ssh key path. Use this if you don't want to provide username and password. - // ssh key path must be mounted in sensor pod. - // Deprecated: will be removed in v1.5, use SSHKeySecret instead. + // Whether to ignore host key // +optional - optional string sshKeyPath = 10; + optional bool insecureIgnoreHostKey = 10; } // GitCreds contain reference to git username and password @@ -398,7 +500,8 @@ message KafkaTrigger { // More info at https://kafka.apache.org/documentation/#intro_topics optional string topic = 2; - // Partition to write data to. + // +optional + // DEPRECATED optional int32 partition = 3; // Parameters is the list of parameters that is applied to resolved Kafka trigger object. @@ -428,7 +531,6 @@ message KafkaTrigger { repeated TriggerParameter payload = 9; // The partitioning key for the messages put on the Kafka topic. - // Defaults to broker url. // +optional. optional string partitioningKey = 10; @@ -439,6 +541,10 @@ message KafkaTrigger { // SASL configuration for the kafka client // +optional optional github.com.argoproj.argo_events.pkg.apis.common.SASLConfig sasl = 12; + + // Schema Registry configuration to producer message with avro format + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.SchemaRegistryConfig schemaRegistry = 13; } message LogTrigger { @@ -507,6 +613,66 @@ message PayloadField { optional string name = 2; } +// PulsarTrigger refers to the specification of the Pulsar trigger. +message PulsarTrigger { + // Configure the service URL for the Pulsar service. + // +required + optional string url = 1; + + // Name of the topic. + // See https://pulsar.apache.org/docs/en/concepts-messaging/ + optional string topic = 2; + + // Parameters is the list of parameters that is applied to resolved Kafka trigger object. + repeated TriggerParameter parameters = 3; + + // Payload is the list of key-value extracted from an event payload to construct the request payload. + repeated TriggerParameter payload = 4; + + // Trusted TLS certificate secret. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector tlsTrustCertsSecret = 5; + + // Whether the Pulsar client accept untrusted TLS certificate from broker. + // +optional + optional bool tlsAllowInsecureConnection = 6; + + // Whether the Pulsar client verify the validity of the host name from broker. + // +optional + optional bool tlsValidateHostname = 7; + + // TLS configuration for the pulsar client. + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.TLSConfig tls = 8; + + // Authentication token for the pulsar client. + // Either token or athenz can be set to use auth. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector authTokenSecret = 9; + + // Backoff holds parameters applied to connection. + // +optional + optional github.com.argoproj.argo_events.pkg.apis.common.Backoff connectionBackoff = 10; + + // Authentication athenz parameters for the pulsar client. + // Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go + // Either token or athenz can be set to use auth. + // +optional + map authAthenzParams = 11; + + // Authentication athenz privateKey secret for the pulsar client. + // AuthAthenzSecret must be set if AuthAthenzParams is used. + // +optional + optional k8s.io.api.core.v1.SecretKeySelector authAthenzSecret = 12; +} + +message RateLimit { + // Defaults to Second + optional string unit = 1; + + optional int32 requestsPerUnit = 2; +} + // Sensor is the definition of a sensor resource // +genclient // +genclient:noStatus @@ -543,22 +709,23 @@ message SensorSpec { // +optional optional Template template = 3; - // DependencyGroups is a list of the groups of events. - repeated DependencyGroup dependencyGroups = 4; - // ErrorOnFailedRound if set to true, marks sensor state as `error` if the previous trigger round fails. // Once sensor state is set to `error`, no further triggers will be processed. - optional bool errorOnFailedRound = 5; + optional bool errorOnFailedRound = 4; // EventBusName references to a EventBus name. By default the value is "default" - optional string eventBusName = 6; - - // Circuit is a boolean expression of dependency groups - // Deprecated: will be removed in v1.5, use Switch in triggers instead. - optional string circuit = 7; + optional string eventBusName = 5; // Replicas is the sensor deployment replicas - optional int32 replicas = 8; + optional int32 replicas = 6; + + // RevisionHistoryLimit specifies how many old deployment revisions to retain + // +optional + optional int32 revisionHistoryLimit = 7; + + // LoggingFields add additional key-value pairs when logging happens + // +optional + map loggingFields = 8; } // SensorStatus contains information about the status of a sensor. @@ -566,6 +733,26 @@ message SensorStatus { optional github.com.argoproj.argo_events.pkg.apis.common.Status status = 1; } +message SlackSender { + // Username is the Slack application's username + // +optional + optional string username = 1; + + // Icon is the Slack application's icon, e.g. :robot_face: or https://example.com/image.png + // +optional + optional string icon = 2; +} + +message SlackThread { + // MessageAggregationKey allows to aggregate the messages to a thread by some key. + // +optional + optional string messageAggregationKey = 1; + + // BroadcastMessageToChannel allows to also broadcast the message from the thread to the channel + // +optional + optional bool broadcastMessageToChannel = 2; +} + // SlackTrigger refers to the specification of the slack notification trigger. message SlackTrigger { // Parameters is the list of key-value extracted from event's payload that are applied to @@ -576,30 +763,43 @@ message SlackTrigger { // SlackToken refers to the Kubernetes secret that holds the slack token required to send messages. optional k8s.io.api.core.v1.SecretKeySelector slackToken = 2; - // Channel refers to which Slack channel to send slack message. + // Channel refers to which Slack channel to send Slack message. // +optional optional string channel = 3; // Message refers to the message to send to the Slack channel. // +optional optional string message = 4; + + // Attachments is a JSON format string that represents an array of Slack attachments according to the attachments API: https://api.slack.com/reference/messaging/attachments . + // +optional + optional string attachments = 5; + + // Blocks is a JSON format string that represents an array of Slack blocks according to the blocks API: https://api.slack.com/reference/block-kit/blocks . + // +optional + optional string blocks = 6; + + // Thread refers to additional options for sending messages to a Slack thread. + // +optional + optional SlackThread thread = 7; + + // Sender refers to additional configuration of the Slack application that sends the message. + // +optional + optional SlackSender sender = 8; } // StandardK8STrigger is the standard Kubernetes resource trigger message StandardK8STrigger { - // The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource - optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource groupVersionResource = 1; - - // Source of the K8 resource file(s) - optional ArtifactLocation source = 2; + // Source of the K8s resource file(s) + optional ArtifactLocation source = 1; // Operation refers to the type of operation performed on the k8s resource. // Default value is Create. // +optional - optional string operation = 3; + optional string operation = 2; // Parameters is the list of parameters that is applied to resolved K8s trigger object. - repeated TriggerParameter parameters = 4; + repeated TriggerParameter parameters = 3; // PatchStrategy controls the K8s object patching strategy when the trigger operation is specified as patch. // possible values: @@ -609,7 +809,7 @@ message StandardK8STrigger { // "application/apply-patch+yaml". // Defaults to "application/merge-patch+json" // +optional - optional string patchStrategy = 5; + optional string patchStrategy = 4; // LiveObject specifies whether the resource should be directly fetched from K8s instead // of being marshaled from the resource artifact. If set to true, the resource artifact @@ -618,7 +818,7 @@ message StandardK8STrigger { // data. // Only valid for operation type `update` // +optional - optional bool liveObject = 6; + optional bool liveObject = 5; } // StatusPolicy refers to the policy used to check the state of the trigger using response status @@ -727,6 +927,17 @@ message Trigger { // Retry strategy, defaults to no retry // +optional optional github.com.argoproj.argo_events.pkg.apis.common.Backoff retryStrategy = 4; + + // Rate limit, default unit is Second + // +optional + optional RateLimit rateLimit = 5; + + // AtLeastOnce determines the trigger execution semantics. + // Defaults to false. Trigger execution will use at-most-once semantics. + // If set to true, Trigger execution will switch to at-least-once semantics. + // +kubebuilder:default=false + // +optional + optional bool atLeastOnce = 6; } // TriggerParameter indicates a passed parameter to a service template @@ -779,6 +990,13 @@ message TriggerParameterSource { // This is only used if the DataKey is invalid. // If the DataKey is invalid and this is not defined, this param source will produce an error. optional string value = 6; + + // UseRawData indicates if the value in an event at data key should be used without converting to string. + // When true, a number, boolean, json or string parameter may be extracted. When the field is unspecified, or explicitly + // false, the behavior is to turn the extracted field into a string. (e.g. when set to true, the parameter + // 123 will resolve to the numerical type, but when false, or not provided, the string "123" will be resolved) + // +optional + optional bool useRawData = 7; } // TriggerPolicy dictates the policy for the trigger retries @@ -790,17 +1008,6 @@ message TriggerPolicy { optional StatusPolicy status = 2; } -// TriggerSwitch describes condition which must be satisfied in order to execute a trigger. -// Depending upon condition type, status of dependency groups is used to evaluate the result. -// Deprecated: will be removed in v1.5 -message TriggerSwitch { - // Any acts as a OR operator between dependencies - repeated string any = 1; - - // All acts as a AND operator between dependencies - repeated string all = 2; -} - // TriggerTemplate is the template that describes trigger specification. message TriggerTemplate { // Name is a unique name of the action to take. @@ -849,16 +1056,27 @@ message TriggerTemplate { // Log refers to the trigger designed to invoke log the event. // +optional - optional LogTrigger log = 13; + optional LogTrigger log = 12; - // DeprecatedSwitch is the condition to execute the trigger. - // Deprecated: will be removed in v1.5, use conditions instead + // AzureEventHubs refers to the trigger send an event to an Azure Event Hub. // +optional - optional TriggerSwitch switch = 12; + optional AzureEventHubsTrigger azureEventHubs = 13; - // AzureEventHubs refers to the trigger send an event to an Azure Event Hub. + // Pulsar refers to the trigger designed to place messages on Pulsar topic. + // +optional + optional PulsarTrigger pulsar = 14; + + // Criteria to reset the conditons + // +optional + repeated ConditionsResetCriteria conditionsReset = 15; + + // AzureServiceBus refers to the trigger designed to place messages on Azure Service Bus + // +optional + optional AzureServiceBusTrigger azureServiceBus = 16; + + // Email refers to the trigger designed to send an email notification // +optional - optional AzureEventHubsTrigger azureEventHubs = 14; + optional EmailTrigger email = 17; } // URLArtifact contains information about an artifact at an http endpoint. diff --git a/pkg/apis/sensor/v1alpha1/openapi_generated.go b/pkg/apis/sensor/v1alpha1/openapi_generated.go index c146d55e25..87c02b804f 100644 --- a/pkg/apis/sensor/v1alpha1/openapi_generated.go +++ b/pkg/apis/sensor/v1alpha1/openapi_generated.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,51 +24,58 @@ limitations under the License. package v1alpha1 import ( - spec "github.com/go-openapi/spec" common "k8s.io/kube-openapi/pkg/common" + spec "k8s.io/kube-openapi/pkg/validation/spec" ) func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AWSLambdaTrigger": schema_pkg_apis_sensor_v1alpha1_AWSLambdaTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArgoWorkflowTrigger": schema_pkg_apis_sensor_v1alpha1_ArgoWorkflowTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArtifactLocation": schema_pkg_apis_sensor_v1alpha1_ArtifactLocation(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureEventHubsTrigger": schema_pkg_apis_sensor_v1alpha1_AzureEventHubsTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.CustomTrigger": schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.DataFilter": schema_pkg_apis_sensor_v1alpha1_DataFilter(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.DependencyGroup": schema_pkg_apis_sensor_v1alpha1_DependencyGroup(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Event": schema_pkg_apis_sensor_v1alpha1_Event(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventContext": schema_pkg_apis_sensor_v1alpha1_EventContext(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependency": schema_pkg_apis_sensor_v1alpha1_EventDependency(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyFilter": schema_pkg_apis_sensor_v1alpha1_EventDependencyFilter(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ExprFilter": schema_pkg_apis_sensor_v1alpha1_ExprFilter(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.FileArtifact": schema_pkg_apis_sensor_v1alpha1_FileArtifact(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitArtifact": schema_pkg_apis_sensor_v1alpha1_GitArtifact(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitCreds": schema_pkg_apis_sensor_v1alpha1_GitCreds(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitRemoteConfig": schema_pkg_apis_sensor_v1alpha1_GitRemoteConfig(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.HTTPTrigger": schema_pkg_apis_sensor_v1alpha1_HTTPTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.K8SResourcePolicy": schema_pkg_apis_sensor_v1alpha1_K8SResourcePolicy(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.KafkaTrigger": schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.LogTrigger": schema_pkg_apis_sensor_v1alpha1_LogTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NATSTrigger": schema_pkg_apis_sensor_v1alpha1_NATSTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.OpenWhiskTrigger": schema_pkg_apis_sensor_v1alpha1_OpenWhiskTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.PayloadField": schema_pkg_apis_sensor_v1alpha1_PayloadField(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Sensor": schema_pkg_apis_sensor_v1alpha1_Sensor(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorList": schema_pkg_apis_sensor_v1alpha1_SensorList(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorSpec": schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorStatus": schema_pkg_apis_sensor_v1alpha1_SensorStatus(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackTrigger": schema_pkg_apis_sensor_v1alpha1_SlackTrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.StandardK8STrigger": schema_pkg_apis_sensor_v1alpha1_StandardK8STrigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.StatusPolicy": schema_pkg_apis_sensor_v1alpha1_StatusPolicy(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Template": schema_pkg_apis_sensor_v1alpha1_Template(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TimeFilter": schema_pkg_apis_sensor_v1alpha1_TimeFilter(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Trigger": schema_pkg_apis_sensor_v1alpha1_Trigger(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter": schema_pkg_apis_sensor_v1alpha1_TriggerParameter(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameterSource": schema_pkg_apis_sensor_v1alpha1_TriggerParameterSource(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerPolicy": schema_pkg_apis_sensor_v1alpha1_TriggerPolicy(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerSwitch": schema_pkg_apis_sensor_v1alpha1_TriggerSwitch(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerTemplate": schema_pkg_apis_sensor_v1alpha1_TriggerTemplate(ref), - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.URLArtifact": schema_pkg_apis_sensor_v1alpha1_URLArtifact(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AWSLambdaTrigger": schema_pkg_apis_sensor_v1alpha1_AWSLambdaTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArgoWorkflowTrigger": schema_pkg_apis_sensor_v1alpha1_ArgoWorkflowTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArtifactLocation": schema_pkg_apis_sensor_v1alpha1_ArtifactLocation(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureEventHubsTrigger": schema_pkg_apis_sensor_v1alpha1_AzureEventHubsTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureServiceBusTrigger": schema_pkg_apis_sensor_v1alpha1_AzureServiceBusTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ConditionsResetByTime": schema_pkg_apis_sensor_v1alpha1_ConditionsResetByTime(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ConditionsResetCriteria": schema_pkg_apis_sensor_v1alpha1_ConditionsResetCriteria(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.CustomTrigger": schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.DataFilter": schema_pkg_apis_sensor_v1alpha1_DataFilter(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EmailTrigger": schema_pkg_apis_sensor_v1alpha1_EmailTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Event": schema_pkg_apis_sensor_v1alpha1_Event(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventContext": schema_pkg_apis_sensor_v1alpha1_EventContext(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependency": schema_pkg_apis_sensor_v1alpha1_EventDependency(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyFilter": schema_pkg_apis_sensor_v1alpha1_EventDependencyFilter(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyTransformer": schema_pkg_apis_sensor_v1alpha1_EventDependencyTransformer(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ExprFilter": schema_pkg_apis_sensor_v1alpha1_ExprFilter(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.FileArtifact": schema_pkg_apis_sensor_v1alpha1_FileArtifact(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitArtifact": schema_pkg_apis_sensor_v1alpha1_GitArtifact(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitCreds": schema_pkg_apis_sensor_v1alpha1_GitCreds(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitRemoteConfig": schema_pkg_apis_sensor_v1alpha1_GitRemoteConfig(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.HTTPTrigger": schema_pkg_apis_sensor_v1alpha1_HTTPTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.K8SResourcePolicy": schema_pkg_apis_sensor_v1alpha1_K8SResourcePolicy(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.KafkaTrigger": schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.LogTrigger": schema_pkg_apis_sensor_v1alpha1_LogTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NATSTrigger": schema_pkg_apis_sensor_v1alpha1_NATSTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.OpenWhiskTrigger": schema_pkg_apis_sensor_v1alpha1_OpenWhiskTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.PayloadField": schema_pkg_apis_sensor_v1alpha1_PayloadField(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.PulsarTrigger": schema_pkg_apis_sensor_v1alpha1_PulsarTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.RateLimit": schema_pkg_apis_sensor_v1alpha1_RateLimit(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Sensor": schema_pkg_apis_sensor_v1alpha1_Sensor(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorList": schema_pkg_apis_sensor_v1alpha1_SensorList(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorSpec": schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorStatus": schema_pkg_apis_sensor_v1alpha1_SensorStatus(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackSender": schema_pkg_apis_sensor_v1alpha1_SlackSender(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackThread": schema_pkg_apis_sensor_v1alpha1_SlackThread(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackTrigger": schema_pkg_apis_sensor_v1alpha1_SlackTrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.StandardK8STrigger": schema_pkg_apis_sensor_v1alpha1_StandardK8STrigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.StatusPolicy": schema_pkg_apis_sensor_v1alpha1_StatusPolicy(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Template": schema_pkg_apis_sensor_v1alpha1_Template(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TimeFilter": schema_pkg_apis_sensor_v1alpha1_TimeFilter(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Trigger": schema_pkg_apis_sensor_v1alpha1_Trigger(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter": schema_pkg_apis_sensor_v1alpha1_TriggerParameter(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameterSource": schema_pkg_apis_sensor_v1alpha1_TriggerParameterSource(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerPolicy": schema_pkg_apis_sensor_v1alpha1_TriggerPolicy(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerTemplate": schema_pkg_apis_sensor_v1alpha1_TriggerTemplate(ref), + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.URLArtifact": schema_pkg_apis_sensor_v1alpha1_URLArtifact(ref), } } @@ -81,25 +89,27 @@ func schema_pkg_apis_sensor_v1alpha1_AWSLambdaTrigger(ref common.ReferenceCallba "functionName": { SchemaProps: spec.SchemaProps{ Description: "FunctionName refers to the name of the function to invoke.", + Default: "", Type: []string{"string"}, Format: "", }, }, "accessKey": { SchemaProps: spec.SchemaProps{ - Description: "AccessKey refers K8 secret containing aws access key", + Description: "AccessKey refers K8s secret containing aws access key", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, "secretKey": { SchemaProps: spec.SchemaProps{ - Description: "SecretKey refers K8 secret containing aws secret key", + Description: "SecretKey refers K8s secret containing aws secret key", Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), }, }, "region": { SchemaProps: spec.SchemaProps{ Description: "Region is AWS region", + Default: "", Type: []string{"string"}, Format: "", }, @@ -111,7 +121,8 @@ func schema_pkg_apis_sensor_v1alpha1_AWSLambdaTrigger(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -124,7 +135,8 @@ func schema_pkg_apis_sensor_v1alpha1_AWSLambdaTrigger(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -137,6 +149,13 @@ func schema_pkg_apis_sensor_v1alpha1_AWSLambdaTrigger(ref common.ReferenceCallba Format: "", }, }, + "roleARN": { + SchemaProps: spec.SchemaProps{ + Description: "RoleARN is the Amazon Resource Name (ARN) of the role to assume.", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"functionName", "region", "payload"}, }, @@ -155,7 +174,7 @@ func schema_pkg_apis_sensor_v1alpha1_ArgoWorkflowTrigger(ref common.ReferenceCal Properties: map[string]spec.Schema{ "source": { SchemaProps: spec.SchemaProps{ - Description: "Source of the K8 resource file(s)", + Description: "Source of the K8s resource file(s)", Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArtifactLocation"), }, }, @@ -173,32 +192,29 @@ func schema_pkg_apis_sensor_v1alpha1_ArgoWorkflowTrigger(ref common.ReferenceCal Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, }, }, - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "resource": { + "args": { SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Description: "Args is the list of arguments to pass to the argo CLI", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, }, }, }, - Required: []string{"group", "version", "resource"}, }, }, Dependencies: []string{ @@ -274,6 +290,7 @@ func schema_pkg_apis_sensor_v1alpha1_AzureEventHubsTrigger(ref common.ReferenceC "fqdn": { SchemaProps: spec.SchemaProps{ Description: "FQDN refers to the namespace dns of Azure Event Hubs to be used i.e. .servicebus.windows.net", + Default: "", Type: []string{"string"}, Format: "", }, @@ -281,6 +298,7 @@ func schema_pkg_apis_sensor_v1alpha1_AzureEventHubsTrigger(ref common.ReferenceC "hubName": { SchemaProps: spec.SchemaProps{ Description: "HubName refers to the Azure Event Hub to send events to", + Default: "", Type: []string{"string"}, Format: "", }, @@ -304,7 +322,8 @@ func schema_pkg_apis_sensor_v1alpha1_AzureEventHubsTrigger(ref common.ReferenceC Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -317,7 +336,8 @@ func schema_pkg_apis_sensor_v1alpha1_AzureEventHubsTrigger(ref common.ReferenceC Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -332,6 +352,130 @@ func schema_pkg_apis_sensor_v1alpha1_AzureEventHubsTrigger(ref common.ReferenceC } } +func schema_pkg_apis_sensor_v1alpha1_AzureServiceBusTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "connectionString": { + SchemaProps: spec.SchemaProps{ + Description: "ConnectionString is the connection string for the Azure Service Bus", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "queueName": { + SchemaProps: spec.SchemaProps{ + Description: "QueueName is the name of the Azure Service Bus Queue", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "topicName": { + SchemaProps: spec.SchemaProps{ + Description: "TopicName is the name of the Azure Service Bus Topic", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "subscriptionName": { + SchemaProps: spec.SchemaProps{ + Description: "SubscriptionName is the name of the Azure Service Bus Topic Subscription", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "tls": { + SchemaProps: spec.SchemaProps{ + Description: "TLS configuration for the service bus client", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"), + }, + }, + "payload": { + SchemaProps: spec.SchemaProps{ + Description: "Payload is the list of key-value extracted from an event payload to construct the request payload.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + }, + }, + }, + }, + }, + "parameters": { + SchemaProps: spec.SchemaProps{ + Description: "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + }, + }, + }, + }, + }, + }, + Required: []string{"queueName", "topicName", "subscriptionName", "payload"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_sensor_v1alpha1_ConditionsResetByTime(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "cron": { + SchemaProps: spec.SchemaProps{ + Description: "Cron is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + Type: []string{"string"}, + Format: "", + }, + }, + "timezone": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_sensor_v1alpha1_ConditionsResetCriteria(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "byTime": { + SchemaProps: spec.SchemaProps{ + Description: "Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ConditionsResetByTime"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ConditionsResetByTime"}, + } +} + func schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -342,6 +486,7 @@ func schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref common.ReferenceCallback) "serverURL": { SchemaProps: spec.SchemaProps{ Description: "ServerURL is the url of the gRPC server that executes custom trigger", + Default: "", Type: []string{"string"}, Format: "", }, @@ -349,6 +494,7 @@ func schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref common.ReferenceCallback) "secure": { SchemaProps: spec.SchemaProps{ Description: "Secure refers to type of the connection between sensor to custom trigger gRPC", + Default: false, Type: []string{"boolean"}, Format: "", }, @@ -374,8 +520,9 @@ func schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref common.ReferenceCallback) Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -388,7 +535,8 @@ func schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -401,19 +549,13 @@ func schema_pkg_apis_sensor_v1alpha1_CustomTrigger(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, }, }, - "certFilePath": { - SchemaProps: spec.SchemaProps{ - Description: "DeprecatedCertFilePath is path to the cert file within sensor for secure connection between sensor and custom trigger gRPC server. Deprecated: will be removed in v1.5, use CertSecret instead", - Type: []string{"string"}, - Format: "", - }, - }, }, Required: []string{"serverURL", "secure", "spec", "payload"}, }, @@ -433,6 +575,7 @@ func schema_pkg_apis_sensor_v1alpha1_DataFilter(ref common.ReferenceCallback) co "path": { SchemaProps: spec.SchemaProps{ Description: "Path is the JSONPath of the event's (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters '*' and '?'. To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -440,6 +583,7 @@ func schema_pkg_apis_sensor_v1alpha1_DataFilter(ref common.ReferenceCallback) co "type": { SchemaProps: spec.SchemaProps{ Description: "Type contains the JSON type of the data", + Default: "", Type: []string{"string"}, Format: "", }, @@ -451,8 +595,9 @@ func schema_pkg_apis_sensor_v1alpha1_DataFilter(ref common.ReferenceCallback) co Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -479,38 +624,95 @@ func schema_pkg_apis_sensor_v1alpha1_DataFilter(ref common.ReferenceCallback) co } } -func schema_pkg_apis_sensor_v1alpha1_DependencyGroup(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_pkg_apis_sensor_v1alpha1_EmailTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "DependencyGroup is the group of dependencies", + Description: "EmailTrigger refers to the specification of the email notification trigger.", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "name": { + "parameters": { + SchemaProps: spec.SchemaProps{ + Description: "Parameters is the list of key-value extracted from event's payload that are applied to the trigger resource.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + }, + }, + }, + }, + }, + "username": { SchemaProps: spec.SchemaProps{ - Description: "Name of the group", + Description: "Username refers to the username used to connect to the smtp server.", Type: []string{"string"}, Format: "", }, }, - "dependencies": { + "smtpPassword": { + SchemaProps: spec.SchemaProps{ + Description: "SMTPPassword refers to the Kubernetes secret that holds the smtp password used to connect to smtp server.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "host": { + SchemaProps: spec.SchemaProps{ + Description: "Host refers to the smtp host url to which email is send.", + Type: []string{"string"}, + Format: "", + }, + }, + "port": { + SchemaProps: spec.SchemaProps{ + Description: "Port refers to the smtp server port to which email is send. Defaults to 0.", + Type: []string{"integer"}, + Format: "int32", + }, + }, + "to": { SchemaProps: spec.SchemaProps{ - Description: "Dependencies of events", + Description: "To refers to the email addresses to which the emails are send.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, }, }, + "from": { + SchemaProps: spec.SchemaProps{ + Description: "From refers to the address from which the email is send from.", + Type: []string{"string"}, + Format: "", + }, + }, + "subject": { + SchemaProps: spec.SchemaProps{ + Description: "Subject refers to the subject line for the email send.", + Type: []string{"string"}, + Format: "", + }, + }, + "body": { + SchemaProps: spec.SchemaProps{ + Description: "Body refers to the body/content of the email send.", + Type: []string{"string"}, + Format: "", + }, + }, }, - Required: []string{"name", "dependencies"}, }, }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -551,6 +753,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventContext(ref common.ReferenceCallback) "id": { SchemaProps: spec.SchemaProps{ Description: "ID of the event; must be non-empty and unique within the scope of the producer.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -558,6 +761,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventContext(ref common.ReferenceCallback) "source": { SchemaProps: spec.SchemaProps{ Description: "Source - A URI describing the event producer.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -565,6 +769,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventContext(ref common.ReferenceCallback) "specversion": { SchemaProps: spec.SchemaProps{ Description: "SpecVersion - The version of the CloudEvents specification used by the event.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -572,6 +777,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventContext(ref common.ReferenceCallback) "type": { SchemaProps: spec.SchemaProps{ Description: "Type - The type of the occurrence which has happened.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -579,6 +785,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventContext(ref common.ReferenceCallback) "datacontenttype": { SchemaProps: spec.SchemaProps{ Description: "DataContentType - A MIME (RFC2046) string describing the media type of `data`.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -586,6 +793,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventContext(ref common.ReferenceCallback) "subject": { SchemaProps: spec.SchemaProps{ Description: "Subject - The subject of the event in the context of the event producer", + Default: "", Type: []string{"string"}, Format: "", }, @@ -615,6 +823,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventDependency(ref common.ReferenceCallbac "name": { SchemaProps: spec.SchemaProps{ Description: "Name is a unique name of this dependency", + Default: "", Type: []string{"string"}, Format: "", }, @@ -622,6 +831,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventDependency(ref common.ReferenceCallbac "eventSourceName": { SchemaProps: spec.SchemaProps{ Description: "EventSourceName is the name of EventSource that Sensor depends on", + Default: "", Type: []string{"string"}, Format: "", }, @@ -629,6 +839,7 @@ func schema_pkg_apis_sensor_v1alpha1_EventDependency(ref common.ReferenceCallbac "eventName": { SchemaProps: spec.SchemaProps{ Description: "EventName is the name of the event", + Default: "", Type: []string{"string"}, Format: "", }, @@ -639,12 +850,25 @@ func schema_pkg_apis_sensor_v1alpha1_EventDependency(ref common.ReferenceCallbac Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyFilter"), }, }, + "transform": { + SchemaProps: spec.SchemaProps{ + Description: "Transform transforms the event data", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyTransformer"), + }, + }, + "filtersLogicalOperator": { + SchemaProps: spec.SchemaProps{ + Description: "FiltersLogicalOperator defines how different filters are evaluated together. Available values: and (&&), or (||) Is optional and if left blank treated as and (&&).", + Type: []string{"string"}, + Format: "", + }, + }, }, Required: []string{"name", "eventSourceName", "eventName"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyFilter"}, + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyFilter", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependencyTransformer"}, } } @@ -674,7 +898,8 @@ func schema_pkg_apis_sensor_v1alpha1_EventDependencyFilter(ref common.ReferenceC Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.DataFilter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.DataFilter"), }, }, }, @@ -687,12 +912,34 @@ func schema_pkg_apis_sensor_v1alpha1_EventDependencyFilter(ref common.ReferenceC Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ExprFilter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ExprFilter"), }, }, }, }, }, + "dataLogicalOperator": { + SchemaProps: spec.SchemaProps{ + Description: "DataLogicalOperator defines how multiple Data filters (if defined) are evaluated together. Available values: and (&&), or (||) Is optional and if left blank treated as and (&&).", + Type: []string{"string"}, + Format: "", + }, + }, + "exprLogicalOperator": { + SchemaProps: spec.SchemaProps{ + Description: "ExprLogicalOperator defines how multiple Exprs filters (if defined) are evaluated together. Available values: and (&&), or (||) Is optional and if left blank treated as and (&&).", + Type: []string{"string"}, + Format: "", + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script refers to a Lua script evaluated to determine the validity of an event.", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -701,6 +948,33 @@ func schema_pkg_apis_sensor_v1alpha1_EventDependencyFilter(ref common.ReferenceC } } +func schema_pkg_apis_sensor_v1alpha1_EventDependencyTransformer(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "EventDependencyTransformer transforms the event", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "jq": { + SchemaProps: spec.SchemaProps{ + Description: "JQ holds the jq command applied for transformation", + Type: []string{"string"}, + Format: "", + }, + }, + "script": { + SchemaProps: spec.SchemaProps{ + Description: "Script refers to a Lua script used to transform the event", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_sensor_v1alpha1_ExprFilter(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -710,6 +984,7 @@ func schema_pkg_apis_sensor_v1alpha1_ExprFilter(ref common.ReferenceCallback) co "expr": { SchemaProps: spec.SchemaProps{ Description: "Expr refers to the expression that determines the outcome of the filter.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -721,7 +996,8 @@ func schema_pkg_apis_sensor_v1alpha1_ExprFilter(ref common.ReferenceCallback) co Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.PayloadField"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.PayloadField"), }, }, }, @@ -765,6 +1041,7 @@ func schema_pkg_apis_sensor_v1alpha1_GitArtifact(ref common.ReferenceCallback) c "url": { SchemaProps: spec.SchemaProps{ Description: "Git URL", + Default: "", Type: []string{"string"}, Format: "", }, @@ -772,6 +1049,7 @@ func schema_pkg_apis_sensor_v1alpha1_GitArtifact(ref common.ReferenceCallback) c "cloneDirectory": { SchemaProps: spec.SchemaProps{ Description: "Directory to clone the repository. We clone complete directory because GitArtifact is not limited to any specific Git service providers. Hence we don't use any specific git provider client.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -791,6 +1069,7 @@ func schema_pkg_apis_sensor_v1alpha1_GitArtifact(ref common.ReferenceCallback) c "filePath": { SchemaProps: spec.SchemaProps{ Description: "Path to file that contains trigger resource definition", + Default: "", Type: []string{"string"}, Format: "", }, @@ -822,10 +1101,10 @@ func schema_pkg_apis_sensor_v1alpha1_GitArtifact(ref common.ReferenceCallback) c Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.GitRemoteConfig"), }, }, - "sshKeyPath": { + "insecureIgnoreHostKey": { SchemaProps: spec.SchemaProps{ - Description: "DeprecatedSSHKeyPath is path to your ssh key path. Use this if you don't want to provide username and password. ssh key path must be mounted in sensor pod. Deprecated: will be removed in v1.5, use SSHKeySecret instead.", - Type: []string{"string"}, + Description: "Whether to ignore host key", + Type: []string{"boolean"}, Format: "", }, }, @@ -873,6 +1152,7 @@ func schema_pkg_apis_sensor_v1alpha1_GitRemoteConfig(ref common.ReferenceCallbac "name": { SchemaProps: spec.SchemaProps{ Description: "Name of the remote to fetch from.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -884,8 +1164,9 @@ func schema_pkg_apis_sensor_v1alpha1_GitRemoteConfig(ref common.ReferenceCallbac Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -908,6 +1189,7 @@ func schema_pkg_apis_sensor_v1alpha1_HTTPTrigger(ref common.ReferenceCallback) c "url": { SchemaProps: spec.SchemaProps{ Description: "URL refers to the URL to send HTTP request to.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -918,7 +1200,8 @@ func schema_pkg_apis_sensor_v1alpha1_HTTPTrigger(ref common.ReferenceCallback) c Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -944,7 +1227,8 @@ func schema_pkg_apis_sensor_v1alpha1_HTTPTrigger(ref common.ReferenceCallback) c Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -971,8 +1255,9 @@ func schema_pkg_apis_sensor_v1alpha1_HTTPTrigger(ref common.ReferenceCallback) c Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1015,8 +1300,9 @@ func schema_pkg_apis_sensor_v1alpha1_K8SResourcePolicy(ref common.ReferenceCallb Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1031,6 +1317,7 @@ func schema_pkg_apis_sensor_v1alpha1_K8SResourcePolicy(ref common.ReferenceCallb "errorOnBackoffTimeout": { SchemaProps: spec.SchemaProps{ Description: "ErrorOnBackoffTimeout determines whether sensor should transition to error state if the trigger policy is unable to determine the state of the resource", + Default: false, Type: []string{"boolean"}, Format: "", }, @@ -1054,6 +1341,7 @@ func schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref common.ReferenceCallback) "url": { SchemaProps: spec.SchemaProps{ Description: "URL of the Kafka broker, multiple URLs separated by comma.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1061,13 +1349,15 @@ func schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref common.ReferenceCallback) "topic": { SchemaProps: spec.SchemaProps{ Description: "Name of the topic. More info at https://kafka.apache.org/documentation/#intro_topics", + Default: "", Type: []string{"string"}, Format: "", }, }, "partition": { SchemaProps: spec.SchemaProps{ - Description: "Partition to write data to.", + Description: "DEPRECATED", + Default: 0, Type: []string{"integer"}, Format: "int32", }, @@ -1079,7 +1369,8 @@ func schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1119,7 +1410,8 @@ func schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1127,7 +1419,7 @@ func schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref common.ReferenceCallback) }, "partitioningKey": { SchemaProps: spec.SchemaProps{ - Description: "The partitioning key for the messages put on the Kafka topic. Defaults to broker url.", + Description: "The partitioning key for the messages put on the Kafka topic.", Type: []string{"string"}, Format: "", }, @@ -1145,12 +1437,18 @@ func schema_pkg_apis_sensor_v1alpha1_KafkaTrigger(ref common.ReferenceCallback) Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.SASLConfig"), }, }, + "schemaRegistry": { + SchemaProps: spec.SchemaProps{ + Description: "Schema Registry configuration to producer message with avro format", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.SchemaRegistryConfig"), + }, + }, }, - Required: []string{"url", "topic", "partition", "payload"}, + Required: []string{"url", "topic", "payload"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"}, + "github.com/argoproj/argo-events/pkg/apis/common.SASLConfig", "github.com/argoproj/argo-events/pkg/apis/common.SchemaRegistryConfig", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"}, } } @@ -1183,6 +1481,7 @@ func schema_pkg_apis_sensor_v1alpha1_NATSTrigger(ref common.ReferenceCallback) c "url": { SchemaProps: spec.SchemaProps{ Description: "URL of the NATS cluster.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1190,6 +1489,7 @@ func schema_pkg_apis_sensor_v1alpha1_NATSTrigger(ref common.ReferenceCallback) c "subject": { SchemaProps: spec.SchemaProps{ Description: "Name of the subject to put message on.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1200,7 +1500,8 @@ func schema_pkg_apis_sensor_v1alpha1_NATSTrigger(ref common.ReferenceCallback) c Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1212,7 +1513,8 @@ func schema_pkg_apis_sensor_v1alpha1_NATSTrigger(ref common.ReferenceCallback) c Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1243,6 +1545,7 @@ func schema_pkg_apis_sensor_v1alpha1_OpenWhiskTrigger(ref common.ReferenceCallba "host": { SchemaProps: spec.SchemaProps{ Description: "Host URL of the OpenWhisk.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1270,6 +1573,7 @@ func schema_pkg_apis_sensor_v1alpha1_OpenWhiskTrigger(ref common.ReferenceCallba "actionName": { SchemaProps: spec.SchemaProps{ Description: "Name of the action/function.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1281,7 +1585,8 @@ func schema_pkg_apis_sensor_v1alpha1_OpenWhiskTrigger(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1294,7 +1599,8 @@ func schema_pkg_apis_sensor_v1alpha1_OpenWhiskTrigger(ref common.ReferenceCallba Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1319,6 +1625,7 @@ func schema_pkg_apis_sensor_v1alpha1_PayloadField(ref common.ReferenceCallback) "path": { SchemaProps: spec.SchemaProps{ Description: "Path is the JSONPath of the event's (JSON decoded) data key Path is a series of keys separated by a dot. A key may contain wildcard characters '*' and '?'. To access an array value use the index as the key. The dot and wildcard characters can be escaped with '\\'. See https://github.com/tidwall/gjson#path-syntax for more information on how to use this.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1326,6 +1633,7 @@ func schema_pkg_apis_sensor_v1alpha1_PayloadField(ref common.ReferenceCallback) "name": { SchemaProps: spec.SchemaProps{ Description: "Name acts as key that holds the value at the path.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1337,6 +1645,151 @@ func schema_pkg_apis_sensor_v1alpha1_PayloadField(ref common.ReferenceCallback) } } +func schema_pkg_apis_sensor_v1alpha1_PulsarTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "PulsarTrigger refers to the specification of the Pulsar trigger.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "url": { + SchemaProps: spec.SchemaProps{ + Description: "Configure the service URL for the Pulsar service.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "topic": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the topic. See https://pulsar.apache.org/docs/en/concepts-messaging/", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "parameters": { + SchemaProps: spec.SchemaProps{ + Description: "Parameters is the list of parameters that is applied to resolved Kafka trigger object.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + }, + }, + }, + }, + }, + "payload": { + SchemaProps: spec.SchemaProps{ + Description: "Payload is the list of key-value extracted from an event payload to construct the request payload.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + }, + }, + }, + }, + }, + "tlsTrustCertsSecret": { + SchemaProps: spec.SchemaProps{ + Description: "Trusted TLS certificate secret.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "tlsAllowInsecureConnection": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the Pulsar client accept untrusted TLS certificate from broker.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tlsValidateHostname": { + SchemaProps: spec.SchemaProps{ + Description: "Whether the Pulsar client verify the validity of the host name from broker.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "tls": { + SchemaProps: spec.SchemaProps{ + Description: "TLS configuration for the pulsar client.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.TLSConfig"), + }, + }, + "authTokenSecret": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication token for the pulsar client. Either token or athenz can be set to use auth.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + "connectionBackoff": { + SchemaProps: spec.SchemaProps{ + Description: "Backoff holds parameters applied to connection.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Backoff"), + }, + }, + "authAthenzParams": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication athenz parameters for the pulsar client. Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go Either token or athenz can be set to use auth.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "authAthenzSecret": { + SchemaProps: spec.SchemaProps{ + Description: "Authentication athenz privateKey secret for the pulsar client. AuthAthenzSecret must be set if AuthAthenzParams is used.", + Ref: ref("k8s.io/api/core/v1.SecretKeySelector"), + }, + }, + }, + Required: []string{"url", "topic", "payload"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/common.TLSConfig", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter", "k8s.io/api/core/v1.SecretKeySelector"}, + } +} + +func schema_pkg_apis_sensor_v1alpha1_RateLimit(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "unit": { + SchemaProps: spec.SchemaProps{ + Description: "Defaults to Second", + Type: []string{"string"}, + Format: "", + }, + }, + "requestsPerUnit": { + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int32", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_sensor_v1alpha1_Sensor(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1360,17 +1813,20 @@ func schema_pkg_apis_sensor_v1alpha1_Sensor(ref common.ReferenceCallback) common }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), }, }, "spec": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorSpec"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorStatus"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SensorStatus"), }, }, }, @@ -1405,7 +1861,8 @@ func schema_pkg_apis_sensor_v1alpha1_SensorList(ref common.ReferenceCallback) co }, "metadata": { SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), }, }, "items": { @@ -1414,7 +1871,8 @@ func schema_pkg_apis_sensor_v1alpha1_SensorList(ref common.ReferenceCallback) co Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Sensor"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Sensor"), }, }, }, @@ -1443,7 +1901,8 @@ func schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref common.ReferenceCallback) co Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependency"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependency"), }, }, }, @@ -1456,7 +1915,8 @@ func schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref common.ReferenceCallback) co Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Trigger"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Trigger"), }, }, }, @@ -1468,19 +1928,6 @@ func schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref common.ReferenceCallback) co Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Template"), }, }, - "dependencyGroups": { - SchemaProps: spec.SchemaProps{ - Description: "DependencyGroups is a list of the groups of events.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.DependencyGroup"), - }, - }, - }, - }, - }, "errorOnFailedRound": { SchemaProps: spec.SchemaProps{ Description: "ErrorOnFailedRound if set to true, marks sensor state as `error` if the previous trigger round fails. Once sensor state is set to `error`, no further triggers will be processed.", @@ -1495,26 +1942,42 @@ func schema_pkg_apis_sensor_v1alpha1_SensorSpec(ref common.ReferenceCallback) co Format: "", }, }, - "circuit": { + "replicas": { SchemaProps: spec.SchemaProps{ - Description: "Circuit is a boolean expression of dependency groups Deprecated: will be removed in v1.5, use Switch in triggers instead.", - Type: []string{"string"}, - Format: "", + Description: "Replicas is the sensor deployment replicas", + Type: []string{"integer"}, + Format: "int32", }, }, - "replicas": { + "revisionHistoryLimit": { SchemaProps: spec.SchemaProps{ - Description: "Replicas is the sensor deployment replicas", + Description: "RevisionHistoryLimit specifies how many old deployment revisions to retain", Type: []string{"integer"}, Format: "int32", }, }, + "loggingFields": { + SchemaProps: spec.SchemaProps{ + Description: "LoggingFields add additional key-value pairs when logging happens", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, Required: []string{"dependencies", "triggers"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.DependencyGroup", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependency", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Template", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Trigger"}, + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EventDependency", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Template", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.Trigger"}, } } @@ -1538,7 +2001,8 @@ func schema_pkg_apis_sensor_v1alpha1_SensorStatus(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Condition"), }, }, }, @@ -1552,6 +2016,58 @@ func schema_pkg_apis_sensor_v1alpha1_SensorStatus(ref common.ReferenceCallback) } } +func schema_pkg_apis_sensor_v1alpha1_SlackSender(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "username": { + SchemaProps: spec.SchemaProps{ + Description: "Username is the Slack application's username", + Type: []string{"string"}, + Format: "", + }, + }, + "icon": { + SchemaProps: spec.SchemaProps{ + Description: "Icon is the Slack application's icon, e.g. :robot_face: or https://example.com/image.png", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_pkg_apis_sensor_v1alpha1_SlackThread(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "messageAggregationKey": { + SchemaProps: spec.SchemaProps{ + Description: "MessageAggregationKey allows to aggregate the messages to a thread by some key.", + Type: []string{"string"}, + Format: "", + }, + }, + "broadcastMessageToChannel": { + SchemaProps: spec.SchemaProps{ + Description: "BroadcastMessageToChannel allows to also broadcast the message from the thread to the channel", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_pkg_apis_sensor_v1alpha1_SlackTrigger(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1566,7 +2082,8 @@ func schema_pkg_apis_sensor_v1alpha1_SlackTrigger(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1580,7 +2097,7 @@ func schema_pkg_apis_sensor_v1alpha1_SlackTrigger(ref common.ReferenceCallback) }, "channel": { SchemaProps: spec.SchemaProps{ - Description: "Channel refers to which Slack channel to send slack message.", + Description: "Channel refers to which Slack channel to send Slack message.", Type: []string{"string"}, Format: "", }, @@ -1592,11 +2109,39 @@ func schema_pkg_apis_sensor_v1alpha1_SlackTrigger(ref common.ReferenceCallback) Format: "", }, }, + "attachments": { + SchemaProps: spec.SchemaProps{ + Description: "Attachments is a JSON format string that represents an array of Slack attachments according to the attachments API: https://api.slack.com/reference/messaging/attachments .", + Type: []string{"string"}, + Format: "", + }, + }, + "blocks": { + SchemaProps: spec.SchemaProps{ + Description: "Blocks is a JSON format string that represents an array of Slack blocks according to the blocks API: https://api.slack.com/reference/block-kit/blocks .", + Type: []string{"string"}, + Format: "", + }, + }, + "thread": { + SchemaProps: spec.SchemaProps{ + Description: "Thread refers to additional options for sending messages to a Slack thread.", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackThread"), + }, + }, + "sender": { + SchemaProps: spec.SchemaProps{ + Description: "Sender refers to additional configuration of the Slack application that sends the message.", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackSender"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter", "k8s.io/api/core/v1.SecretKeySelector"}, + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackSender", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackThread", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter", "k8s.io/api/core/v1.SecretKeySelector"}, } } @@ -1607,27 +2152,9 @@ func schema_pkg_apis_sensor_v1alpha1_StandardK8STrigger(ref common.ReferenceCall Description: "StandardK8STrigger is the standard Kubernetes resource trigger", Type: []string{"object"}, Properties: map[string]spec.Schema{ - "group": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "version": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - "resource": { - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, "source": { SchemaProps: spec.SchemaProps{ - Description: "Source of the K8 resource file(s)", + Description: "Source of the K8s resource file(s)", Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArtifactLocation"), }, }, @@ -1645,7 +2172,8 @@ func schema_pkg_apis_sensor_v1alpha1_StandardK8STrigger(ref common.ReferenceCall Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1666,7 +2194,6 @@ func schema_pkg_apis_sensor_v1alpha1_StandardK8STrigger(ref common.ReferenceCall }, }, }, - Required: []string{"group", "version", "resource"}, }, }, Dependencies: []string{ @@ -1687,8 +2214,9 @@ func schema_pkg_apis_sensor_v1alpha1_StatusPolicy(ref common.ReferenceCallback) Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"integer"}, - Format: "int32", + Default: 0, + Type: []string{"integer"}, + Format: "int32", }, }, }, @@ -1740,7 +2268,8 @@ func schema_pkg_apis_sensor_v1alpha1_Template(ref common.ReferenceCallback) comm Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Volume"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Volume"), }, }, }, @@ -1760,8 +2289,9 @@ func schema_pkg_apis_sensor_v1alpha1_Template(ref common.ReferenceCallback) comm Allows: true, Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", + Default: "", + Type: []string{"string"}, + Format: "", }, }, }, @@ -1774,7 +2304,8 @@ func schema_pkg_apis_sensor_v1alpha1_Template(ref common.ReferenceCallback) comm Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.Toleration"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.Toleration"), }, }, }, @@ -1793,7 +2324,8 @@ func schema_pkg_apis_sensor_v1alpha1_Template(ref common.ReferenceCallback) comm Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/core/v1.LocalObjectReference"), }, }, }, @@ -1837,6 +2369,7 @@ func schema_pkg_apis_sensor_v1alpha1_TimeFilter(ref common.ReferenceCallback) co "start": { SchemaProps: spec.SchemaProps{ Description: "Start is the beginning of a time window in UTC. Before this time, events for this dependency are ignored. Format is hh:mm:ss.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1844,6 +2377,7 @@ func schema_pkg_apis_sensor_v1alpha1_TimeFilter(ref common.ReferenceCallback) co "stop": { SchemaProps: spec.SchemaProps{ Description: "Stop is the end of a time window in UTC. After or equal to this time, events for this dependency are ignored and Format is hh:mm:ss. If it is smaller than Start, it is treated as next day of Start (e.g.: 22:00:00-01:00:00 means 22:00:00-25:00:00).", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1875,7 +2409,8 @@ func schema_pkg_apis_sensor_v1alpha1_Trigger(ref common.ReferenceCallback) commo Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter"), }, }, }, @@ -1893,11 +2428,24 @@ func schema_pkg_apis_sensor_v1alpha1_Trigger(ref common.ReferenceCallback) commo Ref: ref("github.com/argoproj/argo-events/pkg/apis/common.Backoff"), }, }, + "rateLimit": { + SchemaProps: spec.SchemaProps{ + Description: "Rate limit, default unit is Second", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.RateLimit"), + }, + }, + "atLeastOnce": { + SchemaProps: spec.SchemaProps{ + Description: "AtLeastOnce determines the trigger execution semantics. Defaults to false. Trigger execution will use at-most-once semantics. If set to true, Trigger execution will switch to at-least-once semantics.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerPolicy", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerTemplate"}, + "github.com/argoproj/argo-events/pkg/apis/common.Backoff", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.RateLimit", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerParameter", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerPolicy", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerTemplate"}, } } @@ -1917,6 +2465,7 @@ func schema_pkg_apis_sensor_v1alpha1_TriggerParameter(ref common.ReferenceCallba "dest": { SchemaProps: spec.SchemaProps{ Description: "Dest is the JSONPath of a resource key. A path is a series of keys separated by a dot. The colon character can be escaped with '.' The -1 key can be used to append a value to an existing array. See https://github.com/tidwall/sjson#path-syntax for more information about how this is used.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1947,6 +2496,7 @@ func schema_pkg_apis_sensor_v1alpha1_TriggerParameterSource(ref common.Reference "dependencyName": { SchemaProps: spec.SchemaProps{ Description: "DependencyName refers to the name of the dependency. The event which is stored for this dependency is used as payload for the parameterization. Make sure to refer to one of the dependencies you have defined under Dependencies list.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -1986,6 +2536,13 @@ func schema_pkg_apis_sensor_v1alpha1_TriggerParameterSource(ref common.Reference Format: "", }, }, + "useRawData": { + SchemaProps: spec.SchemaProps{ + Description: "UseRawData indicates if the value in an event at data key should be used without converting to string. When true, a number, boolean, json or string parameter may be extracted. When the field is unspecified, or explicitly false, the behavior is to turn the extracted field into a string. (e.g. when set to true, the parameter 123 will resolve to the numerical type, but when false, or not provided, the string \"123\" will be resolved)", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"dependencyName"}, }, @@ -2020,47 +2577,6 @@ func schema_pkg_apis_sensor_v1alpha1_TriggerPolicy(ref common.ReferenceCallback) } } -func schema_pkg_apis_sensor_v1alpha1_TriggerSwitch(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "TriggerSwitch describes condition which must be satisfied in order to execute a trigger. Depending upon condition type, status of dependency groups is used to evaluate the result. Deprecated: will be removed in v1.5", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "any": { - SchemaProps: spec.SchemaProps{ - Description: "Any acts as a OR operator between dependencies", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "all": { - SchemaProps: spec.SchemaProps{ - Description: "All acts as a AND operator between dependencies", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - func schema_pkg_apis_sensor_v1alpha1_TriggerTemplate(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -2071,6 +2587,7 @@ func schema_pkg_apis_sensor_v1alpha1_TriggerTemplate(ref common.ReferenceCallbac "name": { SchemaProps: spec.SchemaProps{ Description: "Name is a unique name of the action to take.", + Default: "", Type: []string{"string"}, Format: "", }, @@ -2142,24 +2659,50 @@ func schema_pkg_apis_sensor_v1alpha1_TriggerTemplate(ref common.ReferenceCallbac Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.LogTrigger"), }, }, - "switch": { - SchemaProps: spec.SchemaProps{ - Description: "DeprecatedSwitch is the condition to execute the trigger. Deprecated: will be removed in v1.5, use conditions instead", - Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerSwitch"), - }, - }, "azureEventHubs": { SchemaProps: spec.SchemaProps{ Description: "AzureEventHubs refers to the trigger send an event to an Azure Event Hub.", Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureEventHubsTrigger"), }, }, + "pulsar": { + SchemaProps: spec.SchemaProps{ + Description: "Pulsar refers to the trigger designed to place messages on Pulsar topic.", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.PulsarTrigger"), + }, + }, + "conditionsReset": { + SchemaProps: spec.SchemaProps{ + Description: "Criteria to reset the conditons", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ConditionsResetCriteria"), + }, + }, + }, + }, + }, + "azureServiceBus": { + SchemaProps: spec.SchemaProps{ + Description: "AzureServiceBus refers to the trigger designed to place messages on Azure Service Bus", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureServiceBusTrigger"), + }, + }, + "email": { + SchemaProps: spec.SchemaProps{ + Description: "Email refers to the trigger designed to send an email notification", + Ref: ref("github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EmailTrigger"), + }, + }, }, Required: []string{"name"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AWSLambdaTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArgoWorkflowTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureEventHubsTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.CustomTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.HTTPTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.KafkaTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.LogTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NATSTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.OpenWhiskTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.StandardK8STrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.TriggerSwitch"}, + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AWSLambdaTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ArgoWorkflowTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureEventHubsTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.AzureServiceBusTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.ConditionsResetCriteria", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.CustomTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.EmailTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.HTTPTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.KafkaTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.LogTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.NATSTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.OpenWhiskTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.PulsarTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.SlackTrigger", "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1.StandardK8STrigger"}, } } @@ -2173,6 +2716,7 @@ func schema_pkg_apis_sensor_v1alpha1_URLArtifact(ref common.ReferenceCallback) c "path": { SchemaProps: spec.SchemaProps{ Description: "Path is the complete URL", + Default: "", Type: []string{"string"}, Format: "", }, diff --git a/pkg/apis/sensor/v1alpha1/register.go b/pkg/apis/sensor/v1alpha1/register.go index 3c94bf43a2..35bf76936d 100644 --- a/pkg/apis/sensor/v1alpha1/register.go +++ b/pkg/apis/sensor/v1alpha1/register.go @@ -29,7 +29,7 @@ var ( SchemeGroupVersion = schema.GroupVersion{Group: sensor.Group, Version: "v1alpha1"} // SchemaGroupVersionKind is a group version kind used to attach owner references - SchemaGroupVersionKind = schema.GroupVersionKind{Group: sensor.Group, Version: "v1alpha1", Kind: sensor.Kind} + SchemaGroupVersionKind = SchemeGroupVersion.WithKind(sensor.Kind) // SchemeBuilder is used to add go types to the GroupVersionKind scheme SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) diff --git a/pkg/apis/sensor/v1alpha1/types.go b/pkg/apis/sensor/v1alpha1/types.go index f9f8970b67..fdfd9016b9 100644 --- a/pkg/apis/sensor/v1alpha1/types.go +++ b/pkg/apis/sensor/v1alpha1/types.go @@ -34,7 +34,6 @@ type KubernetesResourceOperation string // possible values for KubernetesResourceOperation const ( - // deprecate create. Create KubernetesResourceOperation = "create" // create the resource Update KubernetesResourceOperation = "update" // updates the resource Patch KubernetesResourceOperation = "patch" // patch resource @@ -46,12 +45,14 @@ type ArgoWorkflowOperation string // possible values for ArgoWorkflowOperation const ( - Submit ArgoWorkflowOperation = "submit" // submit a workflow - Suspend ArgoWorkflowOperation = "suspend" // suspends a workflow - Resubmit ArgoWorkflowOperation = "resubmit" // resubmit a workflow - Retry ArgoWorkflowOperation = "retry" // retry a workflow - Resume ArgoWorkflowOperation = "resume" // resume a workflow - Terminate ArgoWorkflowOperation = "terminate" // terminate a workflow + Submit ArgoWorkflowOperation = "submit" // submit a workflow + SubmitFrom ArgoWorkflowOperation = "submit-from" // submit from existing resource + Suspend ArgoWorkflowOperation = "suspend" // suspends a workflow + Resubmit ArgoWorkflowOperation = "resubmit" // resubmit a workflow + Retry ArgoWorkflowOperation = "retry" // retry a workflow + Resume ArgoWorkflowOperation = "resume" // resume a workflow + Terminate ArgoWorkflowOperation = "terminate" // terminate a workflow + Stop ArgoWorkflowOperation = "stop" // stop a workflow ) // Comparator refers to the comparator operator for a data filter @@ -99,18 +100,19 @@ type SensorSpec struct { // Template is the pod specification for the sensor // +optional Template *Template `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` - // DependencyGroups is a list of the groups of events. - DependencyGroups []DependencyGroup `json:"dependencyGroups,omitempty" protobuf:"bytes,4,rep,name=dependencyGroups"` // ErrorOnFailedRound if set to true, marks sensor state as `error` if the previous trigger round fails. // Once sensor state is set to `error`, no further triggers will be processed. - ErrorOnFailedRound bool `json:"errorOnFailedRound,omitempty" protobuf:"varint,5,opt,name=errorOnFailedRound"` + ErrorOnFailedRound bool `json:"errorOnFailedRound,omitempty" protobuf:"varint,4,opt,name=errorOnFailedRound"` // EventBusName references to a EventBus name. By default the value is "default" - EventBusName string `json:"eventBusName,omitempty" protobuf:"bytes,6,opt,name=eventBusName"` - // Circuit is a boolean expression of dependency groups - // Deprecated: will be removed in v1.5, use Switch in triggers instead. - DeprecatedCircuit string `json:"circuit,omitempty" protobuf:"bytes,7,opt,name=circuit"` + EventBusName string `json:"eventBusName,omitempty" protobuf:"bytes,5,opt,name=eventBusName"` // Replicas is the sensor deployment replicas - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,8,opt,name=replicas"` + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,6,opt,name=replicas"` + // RevisionHistoryLimit specifies how many old deployment revisions to retain + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,7,opt,name=revisionHistoryLimit"` + // LoggingFields add additional key-value pairs when logging happens + // +optional + LoggingFields map[string]string `json:"loggingFields" protobuf:"bytes,8,rep,name=loggingFields"` } func (s SensorSpec) GetReplicas() int32 { @@ -182,6 +184,14 @@ type Template struct { Affinity *corev1.Affinity `json:"affinity,omitempty" protobuf:"bytes,11,opt,name=affinity"` } +type LogicalOperator string + +const ( + AndLogicalOperator LogicalOperator = "and" // Equal to && + OrLogicalOperator LogicalOperator = "or" // Equal to || + EmptyLogicalOperator LogicalOperator = "" // Empty will default to AND (&&) +) + // EventDependency describes a dependency type EventDependency struct { // Name is a unique name of this dependency @@ -192,14 +202,22 @@ type EventDependency struct { EventName string `json:"eventName" protobuf:"bytes,3,name=eventName"` // Filters and rules governing toleration of success and constraints on the context and data of an event Filters *EventDependencyFilter `json:"filters,omitempty" protobuf:"bytes,4,opt,name=filters"` + // Transform transforms the event data + Transform *EventDependencyTransformer `json:"transform,omitempty" protobuf:"bytes,5,opt,name=transform"` + // FiltersLogicalOperator defines how different filters are evaluated together. + // Available values: and (&&), or (||) + // Is optional and if left blank treated as and (&&). + FiltersLogicalOperator LogicalOperator `json:"filtersLogicalOperator,omitempty" protobuf:"bytes,6,opt,name=filtersLogicalOperator,casttype=LogicalOperator"` } -// DependencyGroup is the group of dependencies -type DependencyGroup struct { - // Name of the group - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Dependencies of events - Dependencies []string `json:"dependencies" protobuf:"bytes,2,rep,name=dependencies"` +// EventDependencyTransformer transforms the event +type EventDependencyTransformer struct { + // JQ holds the jq command applied for transformation + // +optional + JQ string `json:"jq,omitempty" protobuf:"bytes,1,opt,name=jq"` + // Script refers to a Lua script used to transform the event + // +optional + Script string `json:"script,omitempty" protobuf:"bytes,2,opt,name=script"` } // EventDependencyFilter defines filters and constraints for a event. @@ -212,6 +230,16 @@ type EventDependencyFilter struct { Data []DataFilter `json:"data,omitempty" protobuf:"bytes,3,rep,name=data"` // Exprs contains the list of expressions evaluated against the event payload. Exprs []ExprFilter `json:"exprs,omitempty" protobuf:"bytes,4,rep,name=exprs"` + // DataLogicalOperator defines how multiple Data filters (if defined) are evaluated together. + // Available values: and (&&), or (||) + // Is optional and if left blank treated as and (&&). + DataLogicalOperator LogicalOperator `json:"dataLogicalOperator,omitempty" protobuf:"bytes,5,opt,name=dataLogicalOperator,casttype=DataLogicalOperator"` + // ExprLogicalOperator defines how multiple Exprs filters (if defined) are evaluated together. + // Available values: and (&&), or (||) + // Is optional and if left blank treated as and (&&). + ExprLogicalOperator LogicalOperator `json:"exprLogicalOperator,omitempty" protobuf:"bytes,6,opt,name=exprLogicalOperator,casttype=ExprLogicalOperator"` + // Script refers to a Lua script evaluated to determine the validity of an event. + Script string `json:"script,omitempty" protobuf:"bytes,7,opt,name=script"` } type ExprFilter struct { @@ -299,6 +327,29 @@ type Trigger struct { // Retry strategy, defaults to no retry // +optional RetryStrategy *apicommon.Backoff `json:"retryStrategy,omitempty" protobuf:"bytes,4,opt,name=retryStrategy"` + // Rate limit, default unit is Second + // +optional + RateLimit *RateLimit `json:"rateLimit,omitempty" protobuf:"bytes,5,opt,name=rateLimit"` + // AtLeastOnce determines the trigger execution semantics. + // Defaults to false. Trigger execution will use at-most-once semantics. + // If set to true, Trigger execution will switch to at-least-once semantics. + // +kubebuilder:default=false + // +optional + AtLeastOnce bool `json:"atLeastOnce,omitempty" protobuf:"varint,6,opt,name=atLeastOnce"` +} + +type RateLimiteUnit string + +const ( + Second RateLimiteUnit = "Second" + Minute RateLimiteUnit = "Minute" + Hour RateLimiteUnit = "Hour" +) + +type RateLimit struct { + // Defaults to Second + Unit RateLimiteUnit `json:"unit,omitempty" protobuf:"bytes,1,opt,name=unit"` + RequestsPerUnit int32 `json:"requestsPerUnit,omitempty" protobuf:"bytes,2,opt,name=requestsPerUnit"` } // TriggerTemplate is the template that describes trigger specification. @@ -338,38 +389,46 @@ type TriggerTemplate struct { OpenWhisk *OpenWhiskTrigger `json:"openWhisk,omitempty" protobuf:"bytes,11,opt,name=openWhisk"` // Log refers to the trigger designed to invoke log the event. // +optional - Log *LogTrigger `json:"log,omitempty" protobuf:"bytes,13,opt,name=log"` - // DeprecatedSwitch is the condition to execute the trigger. - // Deprecated: will be removed in v1.5, use conditions instead - // +optional - DeprecatedSwitch *TriggerSwitch `json:"switch,omitempty" protobuf:"bytes,12,opt,name=switch"` + Log *LogTrigger `json:"log,omitempty" protobuf:"bytes,12,opt,name=log"` // AzureEventHubs refers to the trigger send an event to an Azure Event Hub. // +optional - AzureEventHubs *AzureEventHubsTrigger `json:"azureEventHubs,omitempty" protobuf:"bytes,14,opt,name=azureEventHubs"` + AzureEventHubs *AzureEventHubsTrigger `json:"azureEventHubs,omitempty" protobuf:"bytes,13,opt,name=azureEventHubs"` + // Pulsar refers to the trigger designed to place messages on Pulsar topic. + // +optional + Pulsar *PulsarTrigger `json:"pulsar,omitempty" protobuf:"bytes,14,opt,name=pulsar"` + // Criteria to reset the conditons + // +optional + ConditionsReset []ConditionsResetCriteria `json:"conditionsReset,omitempty" protobuf:"bytes,15,rep,name=conditionsReset"` + // AzureServiceBus refers to the trigger designed to place messages on Azure Service Bus + // +optional + AzureServiceBus *AzureServiceBusTrigger `json:"azureServiceBus,omitempty" protobuf:"bytes,16,opt,name=azureServiceBus"` + // Email refers to the trigger designed to send an email notification + // +optional + Email *EmailTrigger `json:"email,omitempty" protobuf:"bytes,17,opt,name=email"` +} + +type ConditionsResetCriteria struct { + // Schedule is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron + ByTime *ConditionsResetByTime `json:"byTime,omitempty" protobuf:"bytes,1,opt,name=byTime"` } -// TriggerSwitch describes condition which must be satisfied in order to execute a trigger. -// Depending upon condition type, status of dependency groups is used to evaluate the result. -// Deprecated: will be removed in v1.5 -type TriggerSwitch struct { - // Any acts as a OR operator between dependencies - Any []string `json:"any,omitempty" protobuf:"bytes,1,rep,name=any"` - // All acts as a AND operator between dependencies - All []string `json:"all,omitempty" protobuf:"bytes,2,rep,name=all"` +type ConditionsResetByTime struct { + // Cron is a cron-like expression. For reference, see: https://en.wikipedia.org/wiki/Cron + Cron string `json:"cron,omitempty" protobuf:"bytes,1,opt,name=cron"` + // +optional + Timezone string `json:"timezone,omitempty" protobuf:"bytes,2,opt,name=timezone"` } // StandardK8STrigger is the standard Kubernetes resource trigger type StandardK8STrigger struct { - // The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource - metav1.GroupVersionResource `json:",inline" protobuf:"bytes,1,opt,name=groupVersionResource"` - // Source of the K8 resource file(s) - Source *ArtifactLocation `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` + // Source of the K8s resource file(s) + Source *ArtifactLocation `json:"source,omitempty" protobuf:"bytes,1,opt,name=source"` // Operation refers to the type of operation performed on the k8s resource. // Default value is Create. // +optional - Operation KubernetesResourceOperation `json:"operation,omitempty" protobuf:"bytes,3,opt,name=operation,casttype=KubernetesResourceOperation"` + Operation KubernetesResourceOperation `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=KubernetesResourceOperation"` // Parameters is the list of parameters that is applied to resolved K8s trigger object. - Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,4,rep,name=parameters"` + Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` // PatchStrategy controls the K8s object patching strategy when the trigger operation is specified as patch. // possible values: // "application/json-patch+json" @@ -378,7 +437,7 @@ type StandardK8STrigger struct { // "application/apply-patch+yaml". // Defaults to "application/merge-patch+json" // +optional - PatchStrategy k8stypes.PatchType `json:"patchStrategy,omitempty" protobuf:"bytes,5,opt,name=patchStrategy,casttype=k8s.io/apimachinery/pkg/types.PatchType"` + PatchStrategy k8stypes.PatchType `json:"patchStrategy,omitempty" protobuf:"bytes,4,opt,name=patchStrategy,casttype=k8s.io/apimachinery/pkg/types.PatchType"` // LiveObject specifies whether the resource should be directly fetched from K8s instead // of being marshaled from the resource artifact. If set to true, the resource artifact // must contain the information required to uniquely identify the resource in the cluster, @@ -386,12 +445,12 @@ type StandardK8STrigger struct { // data. // Only valid for operation type `update` // +optional - LiveObject bool `json:"liveObject,omitempty" protobuf:"varint,6,opt,name=liveObject"` + LiveObject bool `json:"liveObject,omitempty" protobuf:"varint,5,opt,name=liveObject"` } // ArgoWorkflowTrigger is the trigger for the Argo Workflow type ArgoWorkflowTrigger struct { - // Source of the K8 resource file(s) + // Source of the K8s resource file(s) Source *ArtifactLocation `json:"source,omitempty" protobuf:"bytes,1,opt,name=source"` // Operation refers to the type of operation performed on the argo workflow resource. // Default value is Submit. @@ -399,8 +458,8 @@ type ArgoWorkflowTrigger struct { Operation ArgoWorkflowOperation `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ArgoWorkflowOperation"` // Parameters is the list of parameters to pass to resolved Argo Workflow object Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - // The unambiguous kind of this object - used in order to retrieve the appropriate kubernetes api client for this resource - metav1.GroupVersionResource `json:",inline" protobuf:"bytes,4,opt,name=groupVersionResource"` + // Args is the list of arguments to pass to the argo CLI + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` } // HTTPTrigger is the trigger for the HTTP request @@ -440,9 +499,11 @@ type HTTPTrigger struct { type AWSLambdaTrigger struct { // FunctionName refers to the name of the function to invoke. FunctionName string `json:"functionName" protobuf:"bytes,1,opt,name=functionName"` - // AccessKey refers K8 secret containing aws access key + // AccessKey refers K8s secret containing aws access key + // +optional AccessKey *corev1.SecretKeySelector `json:"accessKey,omitempty" protobuf:"bytes,2,opt,name=accessKey"` - // SecretKey refers K8 secret containing aws secret key + // SecretKey refers K8s secret containing aws secret key + // +optional SecretKey *corev1.SecretKeySelector `json:"secretKey,omitempty" protobuf:"bytes,3,opt,name=secretKey"` // Region is AWS region Region string `json:"region" protobuf:"bytes,4,opt,name=region"` @@ -466,6 +527,9 @@ type AWSLambdaTrigger struct { // has permission to invoke the function. // +optional InvocationType *string `json:"invocationType,omitempty" protobuf:"bytes,7,opt,name=invocationType"` + // RoleARN is the Amazon Resource Name (ARN) of the role to assume. + // +optional + RoleARN string `json:"roleARN,omitempty" protobuf:"bytes,8,opt,name=roleARN"` } // AzureEventHubsTrigger refers to specification of the Azure Event Hubs Trigger @@ -486,6 +550,26 @@ type AzureEventHubsTrigger struct { Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,6,rep,name=parameters"` } +type AzureServiceBusTrigger struct { + // ConnectionString is the connection string for the Azure Service Bus + ConnectionString *corev1.SecretKeySelector `json:"connectionString,omitempty" protobuf:"bytes,1,opt,name=connectionString"` + // QueueName is the name of the Azure Service Bus Queue + QueueName string `json:"queueName" protobuf:"bytes,2,opt,name=queueName"` + // TopicName is the name of the Azure Service Bus Topic + TopicName string `json:"topicName" protobuf:"bytes,3,opt,name=topicName"` + // SubscriptionName is the name of the Azure Service Bus Topic Subscription + SubscriptionName string `json:"subscriptionName" protobuf:"bytes,4,opt,name=subscriptionName"` + // TLS configuration for the service bus client + // +optional + TLS *apicommon.TLSConfig `json:"tls,omitempty" protobuf:"bytes,5,opt,name=tls"` + // Payload is the list of key-value extracted from an event payload to construct the request payload. + Payload []TriggerParameter `json:"payload" protobuf:"bytes,6,rep,name=payload"` + // Parameters is the list of key-value extracted from event's payload that are applied to + // the trigger resource. + // +optional + Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,7,rep,name=parameters"` +} + // KafkaTrigger refers to the specification of the Kafka trigger. type KafkaTrigger struct { // URL of the Kafka broker, multiple URLs separated by comma. @@ -493,7 +577,8 @@ type KafkaTrigger struct { // Name of the topic. // More info at https://kafka.apache.org/documentation/#intro_topics Topic string `json:"topic" protobuf:"bytes,2,opt,name=topic"` - // Partition to write data to. + // +optional + // DEPRECATED Partition int32 `json:"partition" protobuf:"varint,3,opt,name=partition"` // Parameters is the list of parameters that is applied to resolved Kafka trigger object. Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,4,rep,name=parameters"` @@ -516,15 +601,59 @@ type KafkaTrigger struct { // Payload is the list of key-value extracted from an event payload to construct the request payload. Payload []TriggerParameter `json:"payload" protobuf:"bytes,9,rep,name=payload"` // The partitioning key for the messages put on the Kafka topic. - // Defaults to broker url. // +optional. - PartitioningKey string `json:"partitioningKey,omitempty" protobuf:"bytes,10,opt,name=partitioningKey"` + PartitioningKey *string `json:"partitioningKey,omitempty" protobuf:"bytes,10,opt,name=partitioningKey"` // Specify what kafka version is being connected to enables certain features in sarama, defaults to 1.0.0 // +optional Version string `json:"version,omitempty" protobuf:"bytes,11,opt,name=version"` // SASL configuration for the kafka client // +optional SASL *apicommon.SASLConfig `json:"sasl,omitempty" protobuf:"bytes,12,opt,name=sasl"` + // Schema Registry configuration to producer message with avro format + // +optional + SchemaRegistry *apicommon.SchemaRegistryConfig `json:"schemaRegistry,omitempty" protobuf:"bytes,13,opt,name=schemaRegistry"` +} + +// PulsarTrigger refers to the specification of the Pulsar trigger. +type PulsarTrigger struct { + // Configure the service URL for the Pulsar service. + // +required + URL string `json:"url" protobuf:"bytes,1,name=url"` + // Name of the topic. + // See https://pulsar.apache.org/docs/en/concepts-messaging/ + Topic string `json:"topic" protobuf:"bytes,2,name=topic"` + // Parameters is the list of parameters that is applied to resolved Kafka trigger object. + Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` + // Payload is the list of key-value extracted from an event payload to construct the request payload. + Payload []TriggerParameter `json:"payload" protobuf:"bytes,4,rep,name=payload"` + // Trusted TLS certificate secret. + // +optional + TLSTrustCertsSecret *corev1.SecretKeySelector `json:"tlsTrustCertsSecret,omitempty" protobuf:"bytes,5,opt,name=tlsTrustCertsSecret"` + // Whether the Pulsar client accept untrusted TLS certificate from broker. + // +optional + TLSAllowInsecureConnection bool `json:"tlsAllowInsecureConnection,omitempty" protobuf:"bytes,6,opt,name=tlsAllowInsecureConnection"` + // Whether the Pulsar client verify the validity of the host name from broker. + // +optional + TLSValidateHostname bool `json:"tlsValidateHostname,omitempty" protobuf:"bytes,7,opt,name=tlsValidateHostname"` + // TLS configuration for the pulsar client. + // +optional + TLS *apicommon.TLSConfig `json:"tls,omitempty" protobuf:"bytes,8,opt,name=tls"` + // Authentication token for the pulsar client. + // Either token or athenz can be set to use auth. + // +optional + AuthTokenSecret *corev1.SecretKeySelector `json:"authTokenSecret,omitempty" protobuf:"bytes,9,opt,name=authTokenSecret"` + // Backoff holds parameters applied to connection. + // +optional + ConnectionBackoff *apicommon.Backoff `json:"connectionBackoff,omitempty" protobuf:"bytes,10,opt,name=connectionBackoff"` + // Authentication athenz parameters for the pulsar client. + // Refer https://github.com/apache/pulsar-client-go/blob/master/pulsar/auth/athenz.go + // Either token or athenz can be set to use auth. + // +optional + AuthAthenzParams map[string]string `json:"authAthenzParams,omitempty" protobuf:"bytes,11,rep,name=authAthenzParams"` + // Authentication athenz privateKey secret for the pulsar client. + // AuthAthenzSecret must be set if AuthAthenzParams is used. + // +optional + AuthAthenzSecret *corev1.SecretKeySelector `json:"authAthenzSecret,omitempty" protobuf:"bytes,12,opt,name=authAthenzSecret"` } // NATSTrigger refers to the specification of the NATS trigger. @@ -560,9 +689,38 @@ type CustomTrigger struct { Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,6,rep,name=parameters"` // Payload is the list of key-value extracted from an event payload to construct the request payload. Payload []TriggerParameter `json:"payload" protobuf:"bytes,7,rep,name=payload"` - // DeprecatedCertFilePath is path to the cert file within sensor for secure connection between sensor and custom trigger gRPC server. - // Deprecated: will be removed in v1.5, use CertSecret instead - DeprecatedCertFilePath string `json:"certFilePath,omitempty" protobuf:"bytes,8,opt,name=certFilePath"` +} + +// EmailTrigger refers to the specification of the email notification trigger. +type EmailTrigger struct { + // Parameters is the list of key-value extracted from event's payload that are applied to + // the trigger resource. + // +optional + Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,1,rep,name=parameters"` + // Username refers to the username used to connect to the smtp server. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` + // SMTPPassword refers to the Kubernetes secret that holds the smtp password used to connect to smtp server. + // +optional + SMTPPassword *corev1.SecretKeySelector `json:"smtpPassword,omitempty" protobuf:"bytes,3,opt,name=smtpPassword"` + // Host refers to the smtp host url to which email is send. + Host string `json:"host,omitempty" protobuf:"bytes,4,opt,name=host"` + // Port refers to the smtp server port to which email is send. + // Defaults to 0. + // +optional + Port int32 `json:"port,omitempty" protobuf:"varint,5,opt,name=port"` + // To refers to the email addresses to which the emails are send. + // +optional + To []string `json:"to,omitempty" protobuf:"bytes,6,rep,name=to"` + // From refers to the address from which the email is send from. + // +optional + From string `json:"from,omitempty" protobuf:"bytes,7,opt,name=from"` + // Subject refers to the subject line for the email send. + // +optional + Subject string `json:"subject,omitempty" protobuf:"bytes,8,opt,name=subject"` + // Body refers to the body/content of the email send. + // +optional + Body string `json:"body,omitempty" protobuf:"bytes,9,opt,name=body"` } // SlackTrigger refers to the specification of the slack notification trigger. @@ -573,12 +731,42 @@ type SlackTrigger struct { Parameters []TriggerParameter `json:"parameters,omitempty" protobuf:"bytes,1,rep,name=parameters"` // SlackToken refers to the Kubernetes secret that holds the slack token required to send messages. SlackToken *corev1.SecretKeySelector `json:"slackToken,omitempty" protobuf:"bytes,2,opt,name=slackToken"` - // Channel refers to which Slack channel to send slack message. + // Channel refers to which Slack channel to send Slack message. // +optional Channel string `json:"channel,omitempty" protobuf:"bytes,3,opt,name=channel"` // Message refers to the message to send to the Slack channel. // +optional Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // Attachments is a JSON format string that represents an array of Slack attachments according to the attachments API: https://api.slack.com/reference/messaging/attachments . + // +optional + Attachments string `json:"attachments,omitempty" protobuf:"bytes,5,opt,name=attachments"` + // Blocks is a JSON format string that represents an array of Slack blocks according to the blocks API: https://api.slack.com/reference/block-kit/blocks . + // +optional + Blocks string `json:"blocks,omitempty" protobuf:"bytes,6,opt,name=blocks"` + // Thread refers to additional options for sending messages to a Slack thread. + // +optional + Thread SlackThread `json:"thread,omitempty" protobuf:"bytes,7,opt,name=thread"` + // Sender refers to additional configuration of the Slack application that sends the message. + // +optional + Sender SlackSender `json:"sender,omitempty" protobuf:"bytes,8,opt,name=sender"` +} + +type SlackSender struct { + // Username is the Slack application's username + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // Icon is the Slack application's icon, e.g. :robot_face: or https://example.com/image.png + // +optional + Icon string `json:"icon,omitempty" protobuf:"bytes,2,opt,name=icon"` +} + +type SlackThread struct { + // MessageAggregationKey allows to aggregate the messages to a thread by some key. + // +optional + MessageAggregationKey string `json:"messageAggregationKey,omitempty" protobuf:"bytes,1,opt,name=messageAggregationKey"` + // BroadcastMessageToChannel allows to also broadcast the message from the thread to the channel + // +optional + BroadcastMessageToChannel bool `json:"broadcastMessageToChannel,omitempty" protobuf:"bytes,2,opt,name=broadcastMessageToChannel"` } // OpenWhiskTrigger refers to the specification of the OpenWhisk trigger. @@ -674,6 +862,12 @@ type TriggerParameterSource struct { // This is only used if the DataKey is invalid. // If the DataKey is invalid and this is not defined, this param source will produce an error. Value *string `json:"value,omitempty" protobuf:"bytes,6,opt,name=value"` + // UseRawData indicates if the value in an event at data key should be used without converting to string. + // When true, a number, boolean, json or string parameter may be extracted. When the field is unspecified, or explicitly + // false, the behavior is to turn the extracted field into a string. (e.g. when set to true, the parameter + // 123 will resolve to the numerical type, but when false, or not provided, the string "123" will be resolved) + // +optional + UseRawData bool `json:"useRawData,omitempty" protobuf:"bytes,7,opt,name=useRawData"` } // TriggerPolicy dictates the policy for the trigger retries @@ -822,11 +1016,9 @@ type GitArtifact struct { // Refer https://git-scm.com/docs/git-remote // +optional Remote *GitRemoteConfig `json:"remote,omitempty" protobuf:"bytes,9,opt,name=remote"` - // DeprecatedSSHKeyPath is path to your ssh key path. Use this if you don't want to provide username and password. - // ssh key path must be mounted in sensor pod. - // Deprecated: will be removed in v1.5, use SSHKeySecret instead. + // Whether to ignore host key // +optional - DeprecatedSSHKeyPath string `json:"sshKeyPath,omitempty" protobuf:"bytes,10,opt,name=sshKeyPath"` + InsecureIgnoreHostKey bool `json:"insecureIgnoreHostKey,omitempty" protobuf:"bytes,10,opt,name=insecureIgnoreHostKey"` } // GitRemoteConfig contains the configuration of a Git remote diff --git a/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go index bdb0563d55..2f76878edd 100644 --- a/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/sensor/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,8 @@ +//go:build !ignore_autogenerated // +build !ignore_autogenerated /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -86,7 +87,11 @@ func (in *ArgoWorkflowTrigger) DeepCopyInto(out *ArgoWorkflowTrigger) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - out.GroupVersionResource = in.GroupVersionResource + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -191,6 +196,83 @@ func (in *AzureEventHubsTrigger) DeepCopy() *AzureEventHubsTrigger { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureServiceBusTrigger) DeepCopyInto(out *AzureServiceBusTrigger) { + *out = *in + if in.ConnectionString != nil { + in, out := &in.ConnectionString, &out.ConnectionString + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(common.TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = make([]TriggerParameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]TriggerParameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureServiceBusTrigger. +func (in *AzureServiceBusTrigger) DeepCopy() *AzureServiceBusTrigger { + if in == nil { + return nil + } + out := new(AzureServiceBusTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsResetByTime) DeepCopyInto(out *ConditionsResetByTime) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsResetByTime. +func (in *ConditionsResetByTime) DeepCopy() *ConditionsResetByTime { + if in == nil { + return nil + } + out := new(ConditionsResetByTime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionsResetCriteria) DeepCopyInto(out *ConditionsResetCriteria) { + *out = *in + if in.ByTime != nil { + in, out := &in.ByTime, &out.ByTime + *out = new(ConditionsResetByTime) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionsResetCriteria. +func (in *ConditionsResetCriteria) DeepCopy() *ConditionsResetCriteria { + if in == nil { + return nil + } + out := new(ConditionsResetCriteria) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CustomTrigger) DeepCopyInto(out *CustomTrigger) { *out = *in @@ -255,22 +337,34 @@ func (in *DataFilter) DeepCopy() *DataFilter { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DependencyGroup) DeepCopyInto(out *DependencyGroup) { +func (in *EmailTrigger) DeepCopyInto(out *EmailTrigger) { *out = *in - if in.Dependencies != nil { - in, out := &in.Dependencies, &out.Dependencies + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]TriggerParameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SMTPPassword != nil { + in, out := &in.SMTPPassword, &out.SMTPPassword + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.To != nil { + in, out := &in.To, &out.To *out = make([]string, len(*in)) copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DependencyGroup. -func (in *DependencyGroup) DeepCopy() *DependencyGroup { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmailTrigger. +func (in *EmailTrigger) DeepCopy() *EmailTrigger { if in == nil { return nil } - out := new(DependencyGroup) + out := new(EmailTrigger) in.DeepCopyInto(out) return out } @@ -326,6 +420,11 @@ func (in *EventDependency) DeepCopyInto(out *EventDependency) { *out = new(EventDependencyFilter) (*in).DeepCopyInto(*out) } + if in.Transform != nil { + in, out := &in.Transform, &out.Transform + *out = new(EventDependencyTransformer) + **out = **in + } return } @@ -379,6 +478,22 @@ func (in *EventDependencyFilter) DeepCopy() *EventDependencyFilter { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventDependencyTransformer) DeepCopyInto(out *EventDependencyTransformer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventDependencyTransformer. +func (in *EventDependencyTransformer) DeepCopy() *EventDependencyTransformer { + if in == nil { + return nil + } + out := new(EventDependencyTransformer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExprFilter) DeepCopyInto(out *ExprFilter) { *out = *in @@ -602,11 +717,21 @@ func (in *KafkaTrigger) DeepCopyInto(out *KafkaTrigger) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.PartitioningKey != nil { + in, out := &in.PartitioningKey, &out.PartitioningKey + *out = new(string) + **out = **in + } if in.SASL != nil { in, out := &in.SASL, &out.SASL *out = new(common.SASLConfig) (*in).DeepCopyInto(*out) } + if in.SchemaRegistry != nil { + in, out := &in.SchemaRegistry, &out.SchemaRegistry + *out = new(common.SchemaRegistryConfig) + (*in).DeepCopyInto(*out) + } return } @@ -722,6 +847,84 @@ func (in *PayloadField) DeepCopy() *PayloadField { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PulsarTrigger) DeepCopyInto(out *PulsarTrigger) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]TriggerParameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Payload != nil { + in, out := &in.Payload, &out.Payload + *out = make([]TriggerParameter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TLSTrustCertsSecret != nil { + in, out := &in.TLSTrustCertsSecret, &out.TLSTrustCertsSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(common.TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.AuthTokenSecret != nil { + in, out := &in.AuthTokenSecret, &out.AuthTokenSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + if in.ConnectionBackoff != nil { + in, out := &in.ConnectionBackoff, &out.ConnectionBackoff + *out = new(common.Backoff) + (*in).DeepCopyInto(*out) + } + if in.AuthAthenzParams != nil { + in, out := &in.AuthAthenzParams, &out.AuthAthenzParams + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AuthAthenzSecret != nil { + in, out := &in.AuthAthenzSecret, &out.AuthAthenzSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PulsarTrigger. +func (in *PulsarTrigger) DeepCopy() *PulsarTrigger { + if in == nil { + return nil + } + out := new(PulsarTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RateLimit) DeepCopyInto(out *RateLimit) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RateLimit. +func (in *RateLimit) DeepCopy() *RateLimit { + if in == nil { + return nil + } + out := new(RateLimit) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Sensor) DeepCopyInto(out *Sensor) { *out = *in @@ -805,18 +1008,23 @@ func (in *SensorSpec) DeepCopyInto(out *SensorSpec) { *out = new(Template) (*in).DeepCopyInto(*out) } - if in.DependencyGroups != nil { - in, out := &in.DependencyGroups, &out.DependencyGroups - *out = make([]DependencyGroup, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.Replicas != nil { in, out := &in.Replicas, &out.Replicas *out = new(int32) **out = **in } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.LoggingFields != nil { + in, out := &in.LoggingFields, &out.LoggingFields + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -847,6 +1055,38 @@ func (in *SensorStatus) DeepCopy() *SensorStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackSender) DeepCopyInto(out *SlackSender) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackSender. +func (in *SlackSender) DeepCopy() *SlackSender { + if in == nil { + return nil + } + out := new(SlackSender) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SlackThread) DeepCopyInto(out *SlackThread) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SlackThread. +func (in *SlackThread) DeepCopy() *SlackThread { + if in == nil { + return nil + } + out := new(SlackThread) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SlackTrigger) DeepCopyInto(out *SlackTrigger) { *out = *in @@ -862,6 +1102,8 @@ func (in *SlackTrigger) DeepCopyInto(out *SlackTrigger) { *out = new(v1.SecretKeySelector) (*in).DeepCopyInto(*out) } + out.Thread = in.Thread + out.Sender = in.Sender return } @@ -878,7 +1120,6 @@ func (in *SlackTrigger) DeepCopy() *SlackTrigger { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StandardK8STrigger) DeepCopyInto(out *StandardK8STrigger) { *out = *in - out.GroupVersionResource = in.GroupVersionResource if in.Source != nil { in, out := &in.Source, &out.Source *out = new(ArtifactLocation) @@ -1033,6 +1274,11 @@ func (in *Trigger) DeepCopyInto(out *Trigger) { *out = new(common.Backoff) (*in).DeepCopyInto(*out) } + if in.RateLimit != nil { + in, out := &in.RateLimit, &out.RateLimit + *out = new(RateLimit) + **out = **in + } return } @@ -1114,32 +1360,6 @@ func (in *TriggerPolicy) DeepCopy() *TriggerPolicy { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TriggerSwitch) DeepCopyInto(out *TriggerSwitch) { - *out = *in - if in.Any != nil { - in, out := &in.Any, &out.Any - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.All != nil { - in, out := &in.All, &out.All - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSwitch. -func (in *TriggerSwitch) DeepCopy() *TriggerSwitch { - if in == nil { - return nil - } - out := new(TriggerSwitch) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TriggerTemplate) DeepCopyInto(out *TriggerTemplate) { *out = *in @@ -1193,16 +1413,33 @@ func (in *TriggerTemplate) DeepCopyInto(out *TriggerTemplate) { *out = new(LogTrigger) **out = **in } - if in.DeprecatedSwitch != nil { - in, out := &in.DeprecatedSwitch, &out.DeprecatedSwitch - *out = new(TriggerSwitch) - (*in).DeepCopyInto(*out) - } if in.AzureEventHubs != nil { in, out := &in.AzureEventHubs, &out.AzureEventHubs *out = new(AzureEventHubsTrigger) (*in).DeepCopyInto(*out) } + if in.Pulsar != nil { + in, out := &in.Pulsar, &out.Pulsar + *out = new(PulsarTrigger) + (*in).DeepCopyInto(*out) + } + if in.ConditionsReset != nil { + in, out := &in.ConditionsReset, &out.ConditionsReset + *out = make([]ConditionsResetCriteria, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AzureServiceBus != nil { + in, out := &in.AzureServiceBus, &out.AzureServiceBus + *out = new(AzureServiceBusTrigger) + (*in).DeepCopyInto(*out) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = new(EmailTrigger) + (*in).DeepCopyInto(*out) + } return } diff --git a/pkg/client/eventbus/clientset/versioned/clientset.go b/pkg/client/eventbus/clientset/versioned/clientset.go index c8e47572fc..a3770b1662 100644 --- a/pkg/client/eventbus/clientset/versioned/clientset.go +++ b/pkg/client/eventbus/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package versioned import ( "fmt" + "net/http" argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1" discovery "k8s.io/client-go/discovery" @@ -32,8 +33,7 @@ type Interface interface { ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client @@ -55,22 +55,45 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error - cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfig(&configShallowCopy) + cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -80,11 +103,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.argoprojV1alpha1 = argoprojv1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs } // New creates a new Clientset for the given RESTClient. diff --git a/pkg/client/eventbus/clientset/versioned/doc.go b/pkg/client/eventbus/clientset/versioned/doc.go deleted file mode 100644 index d4d9e0efaf..0000000000 --- a/pkg/client/eventbus/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 BlackRock, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/pkg/client/eventbus/clientset/versioned/fake/clientset_generated.go b/pkg/client/eventbus/clientset/versioned/fake/clientset_generated.go index 537b3741fa..91b0f996b8 100644 --- a/pkg/client/eventbus/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/eventbus/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -74,7 +74,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } -var _ clientset.Interface = &Clientset{} +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) // ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { diff --git a/pkg/client/eventbus/clientset/versioned/fake/doc.go b/pkg/client/eventbus/clientset/versioned/fake/doc.go index 364034b345..e0c04494c2 100644 --- a/pkg/client/eventbus/clientset/versioned/fake/doc.go +++ b/pkg/client/eventbus/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/clientset/versioned/fake/register.go b/pkg/client/eventbus/clientset/versioned/fake/register.go index 741fa545d2..8b1aafd448 100644 --- a/pkg/client/eventbus/clientset/versioned/fake/register.go +++ b/pkg/client/eventbus/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/eventbus/clientset/versioned/scheme/doc.go b/pkg/client/eventbus/clientset/versioned/scheme/doc.go index 766973c41f..682557074b 100644 --- a/pkg/client/eventbus/clientset/versioned/scheme/doc.go +++ b/pkg/client/eventbus/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/clientset/versioned/scheme/register.go b/pkg/client/eventbus/clientset/versioned/scheme/register.go index 15a6faf551..92b133c3d3 100644 --- a/pkg/client/eventbus/clientset/versioned/scheme/register.go +++ b/pkg/client/eventbus/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/doc.go b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/doc.go index 46ca232c1e..e4d79ea8e9 100644 --- a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/doc.go +++ b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus.go b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus.go index 0bb7e57160..35b6c7b845 100644 --- a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus.go +++ b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus_client.go b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus_client.go index 36c8ba169c..89f8fb691c 100644 --- a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus_client.go +++ b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/eventbus_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/client/eventbus/clientset/versioned/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *ArgoprojV1alpha1Client) EventBus(namespace string) EventBusInterface { } // NewForConfig creates a new ArgoprojV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ArgoprojV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ArgoprojV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ArgoprojV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/doc.go b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/doc.go index 9a152b87de..fbac3b1655 100644 --- a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/doc.go +++ b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus.go b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus.go index a8e2481123..51cbb1aa27 100644 --- a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus.go +++ b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeEventBus struct { ns string } -var eventbusResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "eventbus"} +var eventbusResource = v1alpha1.SchemeGroupVersion.WithResource("eventbus") -var eventbusKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "EventBus"} +var eventbusKind = v1alpha1.SchemeGroupVersion.WithKind("EventBus") // Get takes name of the eventBus, and returns the corresponding eventBus object, and an error if there is any. func (c *FakeEventBus) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EventBus, err error) { @@ -117,7 +116,7 @@ func (c *FakeEventBus) UpdateStatus(ctx context.Context, eventBus *v1alpha1.Even // Delete takes name of the eventBus and deletes it. Returns an error if one occurs. func (c *FakeEventBus) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(eventbusResource, c.ns, name), &v1alpha1.EventBus{}) + Invokes(testing.NewDeleteActionWithOptions(eventbusResource, c.ns, name, opts), &v1alpha1.EventBus{}) return err } diff --git a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus_client.go b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus_client.go index 1c717379b7..7e54431970 100644 --- a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus_client.go +++ b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/fake/fake_eventbus_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/generated_expansion.go b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/generated_expansion.go index e8ff4a463b..32a985403a 100644 --- a/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/generated_expansion.go +++ b/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/informers/externalversions/eventbus/interface.go b/pkg/client/eventbus/informers/externalversions/eventbus/interface.go index 660b7993c5..54fc21d877 100644 --- a/pkg/client/eventbus/informers/externalversions/eventbus/interface.go +++ b/pkg/client/eventbus/informers/externalversions/eventbus/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/eventbus.go b/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/eventbus.go index 3641f478fc..4fa2350204 100644 --- a/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/eventbus.go +++ b/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/eventbus.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/interface.go b/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/interface.go index 867344b645..586b687598 100644 --- a/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/interface.go +++ b/pkg/client/eventbus/informers/externalversions/eventbus/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/informers/externalversions/factory.go b/pkg/client/eventbus/informers/externalversions/factory.go index c54537392f..ebfd9ef2fd 100644 --- a/pkg/client/eventbus/informers/externalversions/factory.go +++ b/pkg/client/eventbus/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,11 +42,17 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -75,6 +81,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -107,20 +121,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -142,7 +175,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -160,6 +193,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -167,11 +201,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Argoproj() eventbus.Interface } diff --git a/pkg/client/eventbus/informers/externalversions/generic.go b/pkg/client/eventbus/informers/externalversions/generic.go index 84bd05cd5d..3b990c09be 100644 --- a/pkg/client/eventbus/informers/externalversions/generic.go +++ b/pkg/client/eventbus/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/eventbus/informers/externalversions/internalinterfaces/factory_interfaces.go index 73418cc67f..002582e777 100644 --- a/pkg/client/eventbus/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/eventbus/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/listers/eventbus/v1alpha1/eventbus.go b/pkg/client/eventbus/listers/eventbus/v1alpha1/eventbus.go index f40c96b717..6b789ec6ff 100644 --- a/pkg/client/eventbus/listers/eventbus/v1alpha1/eventbus.go +++ b/pkg/client/eventbus/listers/eventbus/v1alpha1/eventbus.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventbus/listers/eventbus/v1alpha1/expansion_generated.go b/pkg/client/eventbus/listers/eventbus/v1alpha1/expansion_generated.go index 6cef60f460..d2e4c6bb39 100644 --- a/pkg/client/eventbus/listers/eventbus/v1alpha1/expansion_generated.go +++ b/pkg/client/eventbus/listers/eventbus/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/clientset/versioned/clientset.go b/pkg/client/eventsource/clientset/versioned/clientset.go index 4d8b602ee9..e07839d690 100644 --- a/pkg/client/eventsource/clientset/versioned/clientset.go +++ b/pkg/client/eventsource/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package versioned import ( "fmt" + "net/http" argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1" discovery "k8s.io/client-go/discovery" @@ -32,8 +33,7 @@ type Interface interface { ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client @@ -55,22 +55,45 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error - cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfig(&configShallowCopy) + cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -80,11 +103,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.argoprojV1alpha1 = argoprojv1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs } // New creates a new Clientset for the given RESTClient. diff --git a/pkg/client/eventsource/clientset/versioned/fake/clientset_generated.go b/pkg/client/eventsource/clientset/versioned/fake/clientset_generated.go index 87f1fe43d2..afe8570d60 100644 --- a/pkg/client/eventsource/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/eventsource/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -74,7 +74,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } -var _ clientset.Interface = &Clientset{} +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) // ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { diff --git a/pkg/client/eventsource/clientset/versioned/fake/doc.go b/pkg/client/eventsource/clientset/versioned/fake/doc.go index 364034b345..e0c04494c2 100644 --- a/pkg/client/eventsource/clientset/versioned/fake/doc.go +++ b/pkg/client/eventsource/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/clientset/versioned/fake/register.go b/pkg/client/eventsource/clientset/versioned/fake/register.go index 70bf34eff8..60f2fa8339 100644 --- a/pkg/client/eventsource/clientset/versioned/fake/register.go +++ b/pkg/client/eventsource/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/eventsource/clientset/versioned/scheme/doc.go b/pkg/client/eventsource/clientset/versioned/scheme/doc.go index 766973c41f..682557074b 100644 --- a/pkg/client/eventsource/clientset/versioned/scheme/doc.go +++ b/pkg/client/eventsource/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/clientset/versioned/scheme/register.go b/pkg/client/eventsource/clientset/versioned/scheme/register.go index 4ee66ea518..4f6d22604f 100644 --- a/pkg/client/eventsource/clientset/versioned/scheme/register.go +++ b/pkg/client/eventsource/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/doc.go b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/doc.go index 46ca232c1e..e4d79ea8e9 100644 --- a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/doc.go +++ b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource.go b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource.go index 43fa59df1c..6cdb34b348 100644 --- a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource.go +++ b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource_client.go b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource_client.go index f194261b7d..ffb510b8ab 100644 --- a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource_client.go +++ b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/eventsource_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" "github.com/argoproj/argo-events/pkg/client/eventsource/clientset/versioned/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *ArgoprojV1alpha1Client) EventSources(namespace string) EventSourceInter } // NewForConfig creates a new ArgoprojV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ArgoprojV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ArgoprojV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ArgoprojV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/doc.go b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/doc.go index 9a152b87de..fbac3b1655 100644 --- a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/doc.go +++ b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource.go b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource.go index dec3376eb1..a9bcc790af 100644 --- a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource.go +++ b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeEventSources struct { ns string } -var eventsourcesResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "eventsources"} +var eventsourcesResource = v1alpha1.SchemeGroupVersion.WithResource("eventsources") -var eventsourcesKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "EventSource"} +var eventsourcesKind = v1alpha1.SchemeGroupVersion.WithKind("EventSource") // Get takes name of the eventSource, and returns the corresponding eventSource object, and an error if there is any. func (c *FakeEventSources) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EventSource, err error) { @@ -117,7 +116,7 @@ func (c *FakeEventSources) UpdateStatus(ctx context.Context, eventSource *v1alph // Delete takes name of the eventSource and deletes it. Returns an error if one occurs. func (c *FakeEventSources) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(eventsourcesResource, c.ns, name), &v1alpha1.EventSource{}) + Invokes(testing.NewDeleteActionWithOptions(eventsourcesResource, c.ns, name, opts), &v1alpha1.EventSource{}) return err } diff --git a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource_client.go b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource_client.go index dea31e43fb..fef393f35b 100644 --- a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource_client.go +++ b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/fake/fake_eventsource_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/generated_expansion.go b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/generated_expansion.go index 149b5ddb28..2b920f1c69 100644 --- a/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/generated_expansion.go +++ b/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/informers/externalversions/eventsource/interface.go b/pkg/client/eventsource/informers/externalversions/eventsource/interface.go index ab21abc16a..95b3990c5c 100644 --- a/pkg/client/eventsource/informers/externalversions/eventsource/interface.go +++ b/pkg/client/eventsource/informers/externalversions/eventsource/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/eventsource.go b/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/eventsource.go index dbe1f5fb61..c0051759a4 100644 --- a/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/eventsource.go +++ b/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/eventsource.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/interface.go b/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/interface.go index c1c894db76..8ab84f1684 100644 --- a/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/interface.go +++ b/pkg/client/eventsource/informers/externalversions/eventsource/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/informers/externalversions/factory.go b/pkg/client/eventsource/informers/externalversions/factory.go index 24c6f3bd85..e6739cd47e 100644 --- a/pkg/client/eventsource/informers/externalversions/factory.go +++ b/pkg/client/eventsource/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,11 +42,17 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -75,6 +81,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -107,20 +121,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -142,7 +175,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -160,6 +193,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -167,11 +201,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Argoproj() eventsource.Interface } diff --git a/pkg/client/eventsource/informers/externalversions/generic.go b/pkg/client/eventsource/informers/externalversions/generic.go index bda1f51444..2d4a5a7c7a 100644 --- a/pkg/client/eventsource/informers/externalversions/generic.go +++ b/pkg/client/eventsource/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/eventsource/informers/externalversions/internalinterfaces/factory_interfaces.go index aa6520b4bd..421757af00 100644 --- a/pkg/client/eventsource/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/eventsource/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/listers/eventsource/v1alpha1/eventsource.go b/pkg/client/eventsource/listers/eventsource/v1alpha1/eventsource.go index 19f7855e12..c2c5ef8108 100644 --- a/pkg/client/eventsource/listers/eventsource/v1alpha1/eventsource.go +++ b/pkg/client/eventsource/listers/eventsource/v1alpha1/eventsource.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/eventsource/listers/eventsource/v1alpha1/expansion_generated.go b/pkg/client/eventsource/listers/eventsource/v1alpha1/expansion_generated.go index b10c055aa2..e5dc800832 100644 --- a/pkg/client/eventsource/listers/eventsource/v1alpha1/expansion_generated.go +++ b/pkg/client/eventsource/listers/eventsource/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/clientset.go b/pkg/client/sensor/clientset/versioned/clientset.go index 65374ec48c..0b221e3793 100644 --- a/pkg/client/sensor/clientset/versioned/clientset.go +++ b/pkg/client/sensor/clientset/versioned/clientset.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ package versioned import ( "fmt" + "net/http" argoprojv1alpha1 "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1" discovery "k8s.io/client-go/discovery" @@ -32,8 +33,7 @@ type Interface interface { ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient argoprojV1alpha1 *argoprojv1alpha1.ArgoprojV1alpha1Client @@ -55,22 +55,45 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { // NewForConfig creates a new Clientset for the given config. // If config's RateLimiter is not set and QPS and Burst are acceptable, // NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { if configShallowCopy.Burst <= 0 { return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } + var cs Clientset var err error - cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfig(&configShallowCopy) + cs.argoprojV1alpha1, err = argoprojv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err } @@ -80,11 +103,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // NewForConfigOrDie creates a new Clientset for the given config and // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.argoprojV1alpha1 = argoprojv1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs } // New creates a new Clientset for the given RESTClient. diff --git a/pkg/client/sensor/clientset/versioned/doc.go b/pkg/client/sensor/clientset/versioned/doc.go deleted file mode 100644 index d4d9e0efaf..0000000000 --- a/pkg/client/sensor/clientset/versioned/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2020 BlackRock, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/pkg/client/sensor/clientset/versioned/fake/clientset_generated.go b/pkg/client/sensor/clientset/versioned/fake/clientset_generated.go index 42272873f2..fc986bcfd1 100644 --- a/pkg/client/sensor/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/sensor/clientset/versioned/fake/clientset_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -74,7 +74,10 @@ func (c *Clientset) Tracker() testing.ObjectTracker { return c.tracker } -var _ clientset.Interface = &Clientset{} +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) // ArgoprojV1alpha1 retrieves the ArgoprojV1alpha1Client func (c *Clientset) ArgoprojV1alpha1() argoprojv1alpha1.ArgoprojV1alpha1Interface { diff --git a/pkg/client/sensor/clientset/versioned/fake/doc.go b/pkg/client/sensor/clientset/versioned/fake/doc.go index 364034b345..e0c04494c2 100644 --- a/pkg/client/sensor/clientset/versioned/fake/doc.go +++ b/pkg/client/sensor/clientset/versioned/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/fake/register.go b/pkg/client/sensor/clientset/versioned/fake/register.go index ff3a549616..9828ffe559 100644 --- a/pkg/client/sensor/clientset/versioned/fake/register.go +++ b/pkg/client/sensor/clientset/versioned/fake/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/sensor/clientset/versioned/scheme/doc.go b/pkg/client/sensor/clientset/versioned/scheme/doc.go index 766973c41f..682557074b 100644 --- a/pkg/client/sensor/clientset/versioned/scheme/doc.go +++ b/pkg/client/sensor/clientset/versioned/scheme/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/scheme/register.go b/pkg/client/sensor/clientset/versioned/scheme/register.go index 046539db29..9924a565e5 100644 --- a/pkg/client/sensor/clientset/versioned/scheme/register.go +++ b/pkg/client/sensor/clientset/versioned/scheme/register.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -37,14 +37,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/doc.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/doc.go index 46ca232c1e..e4d79ea8e9 100644 --- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/doc.go +++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/doc.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/doc.go index 9a152b87de..fbac3b1655 100644 --- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/doc.go +++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go index b9df72ae33..c2c8f1a2b2 100644 --- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go +++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,7 +24,6 @@ import ( v1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -36,9 +35,9 @@ type FakeSensors struct { ns string } -var sensorsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "sensors"} +var sensorsResource = v1alpha1.SchemeGroupVersion.WithResource("sensors") -var sensorsKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Sensor"} +var sensorsKind = v1alpha1.SchemeGroupVersion.WithKind("Sensor") // Get takes name of the sensor, and returns the corresponding sensor object, and an error if there is any. func (c *FakeSensors) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Sensor, err error) { @@ -105,7 +104,7 @@ func (c *FakeSensors) Update(ctx context.Context, sensor *v1alpha1.Sensor, opts // Delete takes name of the sensor and deletes it. Returns an error if one occurs. func (c *FakeSensors) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { _, err := c.Fake. - Invokes(testing.NewDeleteAction(sensorsResource, c.ns, name), &v1alpha1.Sensor{}) + Invokes(testing.NewDeleteActionWithOptions(sensorsResource, c.ns, name, opts), &v1alpha1.Sensor{}) return err } diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor_client.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor_client.go index 29cb2ca5fc..eba3a92836 100644 --- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor_client.go +++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/fake/fake_sensor_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/generated_expansion.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/generated_expansion.go index b49f34463a..97830d783c 100644 --- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/generated_expansion.go +++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/generated_expansion.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go index 503a72c507..ef762fefed 100644 --- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go +++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go index a68d027cae..5196b8314a 100644 --- a/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go +++ b/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1/sensor_client.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,6 +19,8 @@ limitations under the License. package v1alpha1 import ( + "net/http" + v1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/scheme" rest "k8s.io/client-go/rest" @@ -39,12 +41,28 @@ func (c *ArgoprojV1alpha1Client) Sensors(namespace string) SensorInterface { } // NewForConfig creates a new ArgoprojV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). func NewForConfig(c *rest.Config) (*ArgoprojV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err } - client, err := rest.RESTClientFor(&config) + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ArgoprojV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ArgoprojV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) if err != nil { return nil, err } diff --git a/pkg/client/sensor/informers/externalversions/factory.go b/pkg/client/sensor/informers/externalversions/factory.go index f2a8a74adb..d741074ee6 100644 --- a/pkg/client/sensor/informers/externalversions/factory.go +++ b/pkg/client/sensor/informers/externalversions/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -42,11 +42,17 @@ type sharedInformerFactory struct { lock sync.Mutex defaultResync time.Duration customResync map[reflect.Type]time.Duration + transform cache.TransformFunc informers map[reflect.Type]cache.SharedIndexInformer // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -75,6 +81,14 @@ func WithNamespace(namespace string) SharedInformerOption { } } +// WithTransform sets a transform on all informers. +func WithTransform(transform cache.TransformFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.transform = transform + return factory + } +} + // NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { return NewSharedInformerFactoryWithOptions(client, defaultResync) @@ -107,20 +121,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -142,7 +175,7 @@ func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[ref return res } -// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// InformerFor returns the SharedIndexInformer for obj using an internal // client. func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { f.lock.Lock() @@ -160,6 +193,7 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal } informer = newFunc(f.client, resyncPeriod) + informer.SetTransform(f.transform) f.informers[informerType] = informer return informer @@ -167,11 +201,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Argoproj() sensor.Interface } diff --git a/pkg/client/sensor/informers/externalversions/generic.go b/pkg/client/sensor/informers/externalversions/generic.go index c0a5e22403..f384a25a6f 100644 --- a/pkg/client/sensor/informers/externalversions/generic.go +++ b/pkg/client/sensor/informers/externalversions/generic.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go index 89c64fd70c..e14626906f 100644 --- a/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/client/sensor/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/informers/externalversions/sensor/interface.go b/pkg/client/sensor/informers/externalversions/sensor/interface.go index d958b095ec..2bd1080ad2 100644 --- a/pkg/client/sensor/informers/externalversions/sensor/interface.go +++ b/pkg/client/sensor/informers/externalversions/sensor/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/interface.go b/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/interface.go index 724ebf827f..cccbbdf436 100644 --- a/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/interface.go +++ b/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/interface.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/sensor.go b/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/sensor.go index 7ecdd39744..68d35d7132 100644 --- a/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/sensor.go +++ b/pkg/client/sensor/informers/externalversions/sensor/v1alpha1/sensor.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/listers/sensor/v1alpha1/expansion_generated.go b/pkg/client/sensor/listers/sensor/v1alpha1/expansion_generated.go index 5ce1a6153b..49964bfc2c 100644 --- a/pkg/client/sensor/listers/sensor/v1alpha1/expansion_generated.go +++ b/pkg/client/sensor/listers/sensor/v1alpha1/expansion_generated.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/pkg/client/sensor/listers/sensor/v1alpha1/sensor.go b/pkg/client/sensor/listers/sensor/v1alpha1/sensor.go index 2b376a6b1e..a3972faf47 100644 --- a/pkg/client/sensor/listers/sensor/v1alpha1/sensor.go +++ b/pkg/client/sensor/listers/sensor/v1alpha1/sensor.go @@ -1,5 +1,5 @@ /* -Copyright 2020 BlackRock, Inc. +Copyright 2021 BlackRock, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/sensors/artifacts/configmap.go b/sensors/artifacts/configmap.go index a93c7b1046..4605942a5b 100644 --- a/sensors/artifacts/configmap.go +++ b/sensors/artifacts/configmap.go @@ -5,7 +5,7 @@ import ( corev1 "k8s.io/api/core/v1" ) -// ConfigMapReader implements the ArtifactReader interface for k8 configmap +// ConfigMapReader implements the ArtifactReader interface for K8s configmap type ConfigMapReader struct { configmapArtifact *corev1.ConfigMapKeySelector } diff --git a/sensors/artifacts/file.go b/sensors/artifacts/file.go index 484f36dcbd..db1831759e 100644 --- a/sensors/artifacts/file.go +++ b/sensors/artifacts/file.go @@ -17,8 +17,8 @@ limitations under the License. package artifacts import ( - "errors" - "io/ioutil" + "fmt" + "os" "github.com/argoproj/argo-events/common/logging" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -33,13 +33,13 @@ type FileReader struct { func NewFileReader(fileArtifact *v1alpha1.FileArtifact) (ArtifactReader, error) { // This should never happen! if fileArtifact == nil { - return nil, errors.New("FileArtifact cannot be empty") + return nil, fmt.Errorf("FileArtifact cannot be empty") } return &FileReader{fileArtifact}, nil } func (reader *FileReader) Read() ([]byte, error) { - content, err := ioutil.ReadFile(reader.fileArtifact.Path) + content, err := os.ReadFile(reader.fileArtifact.Path) if err != nil { return nil, err } diff --git a/sensors/artifacts/file_test.go b/sensors/artifacts/file_test.go index 5f33995890..e940b87a89 100644 --- a/sensors/artifacts/file_test.go +++ b/sensors/artifacts/file_test.go @@ -17,7 +17,6 @@ limitations under the License. package artifacts import ( - "io/ioutil" "os" "testing" @@ -28,7 +27,7 @@ import ( func TestFileReader(t *testing.T) { content := []byte("temp content") - tmpfile, err := ioutil.TempFile("", "argo-events-temp") + tmpfile, err := os.CreateTemp("", "argo-events-temp") if err != nil { t.Fatal(err) } diff --git a/sensors/artifacts/git.go b/sensors/artifacts/git.go index 1218c201b0..edabe560d2 100644 --- a/sensors/artifacts/git.go +++ b/sensors/artifacts/git.go @@ -18,8 +18,9 @@ package artifacts import ( "fmt" - "io/ioutil" "os" + "path" + "strings" "github.com/go-git/go-git/v5" "github.com/go-git/go-git/v5/config" @@ -27,7 +28,6 @@ import ( "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/http" go_git_ssh "github.com/go-git/go-git/v5/plumbing/transport/ssh" - "github.com/pkg/errors" "golang.org/x/crypto/ssh" "github.com/argoproj/argo-events/common" @@ -44,6 +44,8 @@ var ( "refs/*:refs/*", "HEAD:refs/heads/HEAD", } + + notAllowedInPath = []string{"..", "~", "\\"} ) type GitArtifactReader struct { @@ -52,6 +54,18 @@ type GitArtifactReader struct { // NewGitReader returns a new git reader func NewGitReader(gitArtifact *v1alpha1.GitArtifact) (*GitArtifactReader, error) { + if gitArtifact == nil { + return nil, fmt.Errorf("nil git artifact") + } + for _, na := range notAllowedInPath { + if strings.Contains(gitArtifact.FilePath, na) { + return nil, fmt.Errorf("%q is not allowed in the filePath", na) + } + if strings.Contains(gitArtifact.CloneDirectory, na) { + return nil, fmt.Errorf("%q is not allowed in the cloneDirectory", na) + } + } + return &GitArtifactReader{ artifact: gitArtifact, }, nil @@ -64,8 +78,8 @@ func (g *GitArtifactReader) getRemote() string { return DefaultRemote } -func getSSHKeyAuth(sshKeyFile string) (transport.AuthMethod, error) { - sshKey, err := ioutil.ReadFile(sshKeyFile) +func getSSHKeyAuth(sshKeyFile string, insecureIgnoreHostKey bool) (transport.AuthMethod, error) { + sshKey, err := os.ReadFile(sshKeyFile) if err != nil { return nil, fmt.Errorf("failed to read ssh key file. err: %+v", err) } @@ -74,7 +88,9 @@ func getSSHKeyAuth(sshKeyFile string) (transport.AuthMethod, error) { return nil, fmt.Errorf("failed to parse ssh key. err: %+v", err) } auth := &go_git_ssh.PublicKeys{User: "git", Signer: signer} - auth.HostKeyCallback = ssh.InsecureIgnoreHostKey() + if insecureIgnoreHostKey { + auth.HostKeyCallback = ssh.InsecureIgnoreHostKey() + } return auth, nil } @@ -82,11 +98,11 @@ func (g *GitArtifactReader) getGitAuth() (transport.AuthMethod, error) { if g.artifact.Creds != nil { username, err := common.GetSecretFromVolume(g.artifact.Creds.Username) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve username") + return nil, fmt.Errorf("failed to retrieve username, %w", err) } password, err := common.GetSecretFromVolume(g.artifact.Creds.Password) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve password") + return nil, fmt.Errorf("failed to retrieve password, %w", err) } return &http.BasicAuth{ Username: username, @@ -96,13 +112,9 @@ func (g *GitArtifactReader) getGitAuth() (transport.AuthMethod, error) { if g.artifact.SSHKeySecret != nil { sshKeyPath, err := common.GetSecretVolumePath(g.artifact.SSHKeySecret) if err != nil { - return nil, errors.Wrap(err, "failed to get SSH key from mounted volume") + return nil, fmt.Errorf("failed to get SSH key from mounted volume, %w", err) } - return getSSHKeyAuth(sshKeyPath) - } - // DEPRECATED - if g.artifact.DeprecatedSSHKeyPath != "" { - return getSSHKeyAuth(g.artifact.DeprecatedSSHKeyPath) + return getSSHKeyAuth(sshKeyPath, g.artifact.InsecureIgnoreHostKey) } return nil, nil } @@ -119,7 +131,7 @@ func (g *GitArtifactReader) readFromRepository(r *git.Repository, dir string) ([ URLs: g.artifact.Remote.URLS, }) if err != nil { - return nil, fmt.Errorf("failed to create remote. err: %+v", err) + return nil, fmt.Errorf("failed to create remote. err: %w", err) } fetchOptions := &git.FetchOptions{ @@ -132,13 +144,13 @@ func (g *GitArtifactReader) readFromRepository(r *git.Repository, dir string) ([ } if err := r.Fetch(fetchOptions); err != nil { - return nil, fmt.Errorf("failed to fetch remote %s. err: %+v", g.artifact.Remote.Name, err) + return nil, fmt.Errorf("failed to fetch remote %s. err: %w", g.artifact.Remote.Name, err) } } w, err := r.Worktree() if err != nil { - return nil, fmt.Errorf("failed to get working tree. err: %+v", err) + return nil, fmt.Errorf("failed to get working tree. err: %w", err) } fetchOptions := &git.FetchOptions{ @@ -180,8 +192,16 @@ func (g *GitArtifactReader) readFromRepository(r *git.Repository, dir string) ([ return nil, fmt.Errorf("failed to pull latest updates. err: %+v", err) } } - - return ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, g.artifact.FilePath)) + filePath := fmt.Sprintf("%s/%s", dir, g.artifact.FilePath) + // symbol link is not allowed due to security concern + isSymbolLink, err := isSymbolLink(filePath) + if err != nil { + return nil, err + } + if isSymbolLink { + return nil, fmt.Errorf("%q is a symbol link which is not allowed", g.artifact.FilePath) + } + return os.ReadFile(filePath) } func (g *GitArtifactReader) getBranchOrTag() *git.CheckoutOptions { @@ -205,9 +225,9 @@ func (g *GitArtifactReader) getBranchOrTag() *git.CheckoutOptions { func (g *GitArtifactReader) Read() ([]byte, error) { cloneDir := g.artifact.CloneDirectory if cloneDir == "" { - tempDir, err := ioutil.TempDir("", "git-tmp") + tempDir, err := os.MkdirTemp("", "git-tmp") if err != nil { - return nil, errors.Wrap(err, "failed to create a temp file to clone the repository") + return nil, fmt.Errorf("failed to create a temp file to clone the repository, %w", err) } defer os.Remove(tempDir) cloneDir = tempDir @@ -216,7 +236,7 @@ func (g *GitArtifactReader) Read() ([]byte, error) { r, err := git.PlainOpen(cloneDir) if err != nil { if err != git.ErrRepositoryNotExists { - return nil, fmt.Errorf("failed to open repository. err: %+v", err) + return nil, fmt.Errorf("failed to open repository. err: %w", err) } cloneOpt := &git.CloneOptions{ @@ -245,3 +265,14 @@ func (g *GitArtifactReader) Read() ([]byte, error) { } return g.readFromRepository(r, cloneDir) } + +func isSymbolLink(filepath string) (bool, error) { + fi, err := os.Lstat(path.Clean(filepath)) + if err != nil { + return false, err + } + if fi.Mode()&os.ModeSymlink != 0 { + return true, nil + } + return false, nil +} diff --git a/sensors/artifacts/git_test.go b/sensors/artifacts/git_test.go index 4e80e43d60..1872182b34 100644 --- a/sensors/artifacts/git_test.go +++ b/sensors/artifacts/git_test.go @@ -19,7 +19,7 @@ package artifacts import ( "testing" - "github.com/smartystreets/goconvey/convey" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -46,35 +46,47 @@ var gar = &GitArtifactReader{ } func TestNewGitReader(t *testing.T) { - convey.Convey("Given configuration, get new git reader", t, func() { + t.Run("Given configuration, get new git reader", func(t *testing.T) { reader, err := NewGitReader(&v1alpha1.GitArtifact{}) - convey.So(err, convey.ShouldBeNil) - convey.So(reader, convey.ShouldNotBeNil) + assert.NoError(t, err) + assert.NotNil(t, reader) + }) + + t.Run("bad clone dir", func(t *testing.T) { + _, err := NewGitReader(&v1alpha1.GitArtifact{CloneDirectory: "/abc/../opt"}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not allowed") + }) + + t.Run("bad file path", func(t *testing.T) { + _, err := NewGitReader(&v1alpha1.GitArtifact{FilePath: "abc/efg/../../../root"}) + assert.Error(t, err) + assert.Contains(t, err.Error(), "not allowed") }) } func TestGetRemote(t *testing.T) { - convey.Convey("Test git remote", t, func() { + t.Run("Test git remote", func(t *testing.T) { remote := gar.getRemote() - convey.So(remote, convey.ShouldEqual, DefaultRemote) + assert.Equal(t, DefaultRemote, remote) }) } func TestGetBranchOrTag(t *testing.T) { - convey.Convey("Given a git minio, get the branch or tag", t, func() { + t.Run("Given a git minio, get the branch or tag", func(t *testing.T) { br := gar.getBranchOrTag() - convey.So(br.Branch, convey.ShouldEqual, "refs/heads/master") + assert.Equal(t, "refs/heads/master", br.Branch.String()) gar.artifact.Branch = "br" br = gar.getBranchOrTag() - convey.So(br.Branch, convey.ShouldNotEqual, "refs/heads/master") + assert.NotEqual(t, "refs/heads/master", br.Branch.String()) gar.artifact.Tag = "t" tag := gar.getBranchOrTag() - convey.So(tag.Branch, convey.ShouldNotEqual, "refs/heads/master") + assert.NotEqual(t, "refs/heads/master", tag.Branch.String()) }) - convey.Convey("Given a git minio with a specific ref, get the ref", t, func() { + t.Run("Given a git minio with a specific ref, get the ref", func(t *testing.T) { gar.artifact.Ref = "refs/something/weird/or/specific" br := gar.getBranchOrTag() - convey.So(br.Branch, convey.ShouldEqual, "refs/something/weird/or/specific") + assert.Equal(t, "refs/something/weird/or/specific", br.Branch.String()) }) } diff --git a/sensors/artifacts/inline.go b/sensors/artifacts/inline.go index 1c6e426203..92353c450e 100644 --- a/sensors/artifacts/inline.go +++ b/sensors/artifacts/inline.go @@ -17,7 +17,7 @@ limitations under the License. package artifacts import ( - "errors" + "fmt" "github.com/argoproj/argo-events/common/logging" ) @@ -31,7 +31,7 @@ type InlineReader struct { func NewInlineReader(inlineArtifact *string) (ArtifactReader, error) { // This should never happen! if inlineArtifact == nil { - return nil, errors.New("InlineArtifact does not exist") + return nil, fmt.Errorf("InlineArtifact does not exist") } return &InlineReader{inlineArtifact}, nil } diff --git a/sensors/artifacts/resource.go b/sensors/artifacts/resource.go index 3619bcd477..3ca6aaafb5 100644 --- a/sensors/artifacts/resource.go +++ b/sensors/artifacts/resource.go @@ -18,7 +18,7 @@ package artifacts import ( "encoding/json" - "errors" + "fmt" "github.com/ghodss/yaml" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -35,7 +35,7 @@ type ResourceReader struct { // NewResourceReader creates a new ArtifactReader for resource func NewResourceReader(resourceArtifact *common.Resource) (ArtifactReader, error) { if resourceArtifact == nil { - return nil, errors.New("ResourceArtifact does not exist") + return nil, fmt.Errorf("ResourceArtifact does not exist") } data, err := json.Marshal(resourceArtifact) if err != nil { diff --git a/sensors/artifacts/s3.go b/sensors/artifacts/s3.go index 7bf7eff60c..f02a745bd0 100644 --- a/sensors/artifacts/s3.go +++ b/sensors/artifacts/s3.go @@ -17,10 +17,12 @@ limitations under the License. package artifacts import ( + "context" "fmt" - "io/ioutil" + "io" - "github.com/minio/minio-go" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" "github.com/argoproj/argo-events/common/logging" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -49,7 +51,7 @@ func NewS3Reader(s3 *apicommon.S3Artifact, creds *Credentials) (ArtifactReader, func (reader *S3Reader) Read() ([]byte, error) { log := logging.NewArgoEventsLogger() log.Debugf("reading s3Artifact from %s/%s", reader.s3.Bucket.Name, reader.s3.Bucket.Key) - obj, err := reader.client.GetObject(reader.s3.Bucket.Name, reader.s3.Bucket.Key, minio.GetObjectOptions{}) + obj, err := reader.client.GetObject(context.Background(), reader.s3.Bucket.Name, reader.s3.Bucket.Key, minio.GetObjectOptions{}) if err != nil { return nil, err } @@ -59,7 +61,7 @@ func (reader *S3Reader) Read() ([]byte, error) { } }() - b, err := ioutil.ReadAll(obj) + b, err := io.ReadAll(io.LimitReader(obj, 1024*1224)) if err != nil { return nil, err } @@ -71,9 +73,11 @@ func NewMinioClient(s3 *apicommon.S3Artifact, creds Credentials) (*minio.Client, var minioClient *minio.Client var err error if s3.Region != "" { - minioClient, err = minio.NewWithRegion(s3.Endpoint, creds.accessKey, creds.secretKey, !s3.Insecure, s3.Region) + minioClient, err = minio.New(s3.Endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(creds.accessKey, creds.secretKey, ""), Secure: !s3.Insecure, Region: s3.Region}) } else { - minioClient, err = minio.New(s3.Endpoint, creds.accessKey, creds.secretKey, !s3.Insecure) + minioClient, err = minio.New(s3.Endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(creds.accessKey, creds.secretKey, ""), Secure: !s3.Insecure}) } if err != nil { return nil, err diff --git a/sensors/artifacts/store.go b/sensors/artifacts/store.go index d41aa0a027..7fb80bb453 100644 --- a/sensors/artifacts/store.go +++ b/sensors/artifacts/store.go @@ -21,7 +21,6 @@ import ( "strings" "github.com/ghodss/yaml" - "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/argoproj/argo-events/common" @@ -37,12 +36,12 @@ type ArtifactReader interface { func FetchArtifact(reader ArtifactReader) (*unstructured.Unstructured, error) { var obj []byte - if err := common.Connect(&common.DefaultBackoff, func() error { + if err := common.DoWithRetry(&common.DefaultBackoff, func() error { var e error obj, e = reader.Read() return e }); err != nil { - return nil, errors.Wrap(err, "failed to fetch artifact") + return nil, fmt.Errorf("failed to fetch artifact, %w", err) } return decodeAndUnstructure(obj) } @@ -57,11 +56,11 @@ func GetCredentials(art *v1alpha1.ArtifactLocation) (*Credentials, error) { if art.S3 != nil { accessKey, err := common.GetSecretFromVolume(art.S3.AccessKey) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve accessKey") + return nil, fmt.Errorf("failed to retrieve accessKey, %w", err) } secretKey, err := common.GetSecretFromVolume(art.S3.SecretKey) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve secretKey") + return nil, fmt.Errorf("failed to retrieve secretKey, %w", err) } return &Credentials{ accessKey: strings.TrimSpace(accessKey), diff --git a/sensors/artifacts/store_test.go b/sensors/artifacts/store_test.go index 06bb3c0eac..baf66f32aa 100644 --- a/sensors/artifacts/store_test.go +++ b/sensors/artifacts/store_test.go @@ -18,7 +18,7 @@ package artifacts import ( "context" - "io/ioutil" + "os" "testing" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -74,7 +74,7 @@ func TestGetArtifactReader(t *testing.T) { } func TestDecodeSensor(t *testing.T) { - b, err := ioutil.ReadFile("../../examples/sensors/multi-trigger-sensor.yaml") + b, err := os.ReadFile("../../examples/sensors/multi-trigger-sensor.yaml") assert.Nil(t, err) _, err = decodeAndUnstructure(b) assert.Nil(t, err) diff --git a/sensors/artifacts/url.go b/sensors/artifacts/url.go index d854cf9d13..3764b14209 100644 --- a/sensors/artifacts/url.go +++ b/sensors/artifacts/url.go @@ -2,11 +2,10 @@ package artifacts import ( "crypto/tls" - "io/ioutil" + "fmt" + "io" "net/http" - "github.com/pkg/errors" - "github.com/argoproj/argo-events/common/logging" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) @@ -19,7 +18,7 @@ type URLReader struct { // NewURLReader creates a new ArtifactReader for workflows at URL endpoints. func NewURLReader(urlArtifact *v1alpha1.URLArtifact) (ArtifactReader, error) { if urlArtifact == nil { - return nil, errors.New("URLArtifact cannot be empty") + return nil, fmt.Errorf("URLArtifact cannot be empty") } return &URLReader{urlArtifact}, nil } @@ -42,10 +41,10 @@ func (reader *URLReader) Read() ([]byte, error) { if resp.StatusCode != http.StatusOK { log.Warnf("failed to read %s. status code: %d", reader.urlArtifact.Path, resp.StatusCode) - return nil, errors.Errorf("status code %v", resp.StatusCode) + return nil, fmt.Errorf("status code %v", resp.StatusCode) } - content, err := ioutil.ReadAll(resp.Body) + content, err := io.ReadAll(io.LimitReader(resp.Body, 512*1024)) if err != nil { log.Warnf("failed to read url body for %s: %s", reader.urlArtifact.Path, err) return nil, err diff --git a/sensors/cmd/start.go b/sensors/cmd/start.go index da6be076e9..28b4521115 100644 --- a/sensors/cmd/start.go +++ b/sensors/cmd/start.go @@ -12,6 +12,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager/signals" argoevents "github.com/argoproj/argo-events" + "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" "github.com/argoproj/argo-events/metrics" @@ -52,7 +53,14 @@ func Start() { logger.Fatalw("failed to unmarshal bus config object", zap.Error(err)) } } - + if busConfig.NATS != nil { + for _, trigger := range sensor.Spec.Triggers { + if trigger.AtLeastOnce { + logger.Warn("ignoring atLeastOnce when using NATS") + trigger.AtLeastOnce = false + } + } + } ebSubject, defined := os.LookupEnv(common.EnvVarEventBusSubject) if !defined { logger.Fatalf("required environment variable '%s' not defined", common.EnvVarEventBusSubject) @@ -66,12 +74,21 @@ func Start() { dynamicClient := dynamic.NewForConfigOrDie(restConfig) logger = logger.With("sensorName", sensor.Name) + for name, value := range sensor.Spec.LoggingFields { + logger.With(name, value) + } + ctx := logging.WithLogger(signals.SetupSignalHandler(), logger) m := metrics.NewMetrics(sensor.Namespace) go m.Run(ctx, fmt.Sprintf(":%d", common.SensorMetricsPort)) + cfClient, err := codefresh.NewClient(ctx, sensor.Namespace) + if err != nil { + logger.Fatalw("unable to initialise Codefresh Client", zap.Error(err)) + } + logger.Infow("starting sensor server", "version", argoevents.GetVersion()) - sensorExecutionCtx := sensors.NewSensorContext(kubeClient, dynamicClient, sensor, busConfig, ebSubject, hostname, m) + sensorExecutionCtx := sensors.NewSensorContext(kubeClient, dynamicClient, sensor, busConfig, ebSubject, hostname, m, cfClient) if err := sensorExecutionCtx.Start(ctx); err != nil { logger.Fatalw("failed to listen to events", zap.Error(err)) } diff --git a/sensors/common/util.go b/sensors/common/util.go index c8d5c2e973..874b842b11 100644 --- a/sensors/common/util.go +++ b/sensors/common/util.go @@ -26,11 +26,15 @@ func ApplyEventLabels(labels map[string]string, events map[string]*v1alpha1.Even return nil } -func ApplySensorUniquenessLabels(labels map[string]string, sensor *v1alpha1.Sensor) { +func ApplySensorLabels(labels map[string]string, sensor *v1alpha1.Sensor) { sensorGVK := sensor.GroupVersionKind() labels["events.argoproj.io/sensor-group"] = sensorGVK.Group labels["events.argoproj.io/sensor-version"] = sensorGVK.Version labels["events.argoproj.io/sensor-kind"] = sensorGVK.Kind labels["events.argoproj.io/sensor-namespace"] = sensor.Namespace labels["events.argoproj.io/sensor"] = sensor.Name -} \ No newline at end of file + + if sensor.Labels != nil && sensor.Labels["app.kubernetes.io/instance"] != "" { + labels["events.argoproj.io/sensor-app-name"] = sensor.Labels["app.kubernetes.io/instance"] + } +} diff --git a/sensors/context.go b/sensors/context.go index f296ba6f7a..8865f6a480 100644 --- a/sensors/context.go +++ b/sensors/context.go @@ -21,14 +21,18 @@ import ( "time" eventhubs "github.com/Azure/azure-event-hubs-go/v3" - "github.com/Shopify/sarama" + servicebus "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus" + "github.com/IBM/sarama" "github.com/apache/openwhisk-client-go/whisk" + "github.com/apache/pulsar-client-go/pulsar" "github.com/aws/aws-sdk-go/service/lambda" - natslib "github.com/nats-io/go-nats" + natslib "github.com/nats-io/nats.go" "google.golang.org/grpc" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "github.com/argoproj/argo-events/codefresh" + "github.com/argoproj/argo-events/common" sensormetrics "github.com/argoproj/argo-events/metrics" eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -49,26 +53,31 @@ type SensorContext struct { hostname string // httpClients holds the reference to HTTP clients for HTTP triggers. - httpClients map[string]*http.Client + httpClients common.StringKeyedMap[*http.Client] // customTriggerClients holds the references to the gRPC clients for the custom trigger servers - customTriggerClients map[string]*grpc.ClientConn + customTriggerClients common.StringKeyedMap[*grpc.ClientConn] // http client to send slack messages. slackHTTPClient *http.Client // kafkaProducers holds references to the active kafka producers - kafkaProducers map[string]sarama.AsyncProducer + kafkaProducers common.StringKeyedMap[sarama.AsyncProducer] + // pulsarProducers holds references to the active pulsar producers + pulsarProducers common.StringKeyedMap[pulsar.Producer] // natsConnections holds the references to the active nats connections. - natsConnections map[string]*natslib.Conn + natsConnections common.StringKeyedMap[*natslib.Conn] // awsLambdaClients holds the references to active AWS Lambda clients. - awsLambdaClients map[string]*lambda.Lambda + awsLambdaClients common.StringKeyedMap[*lambda.Lambda] // openwhiskClients holds the references to active OpenWhisk clients. - openwhiskClients map[string]*whisk.Client + openwhiskClients common.StringKeyedMap[*whisk.Client] // azureEventHubsClients holds the references to active Azure Event Hub clients. - azureEventHubsClients map[string]*eventhubs.Hub - metrics *sensormetrics.Metrics + azureEventHubsClients common.StringKeyedMap[*eventhubs.Hub] + // azureServiceBusClients holds the references to active Azure Service Bus clients. + azureServiceBusClients common.StringKeyedMap[*servicebus.Sender] + metrics *sensormetrics.Metrics + cfClient *codefresh.Client } // NewSensorContext returns a new sensor execution context. -func NewSensorContext(kubeClient kubernetes.Interface, dynamicClient dynamic.Interface, sensor *v1alpha1.Sensor, eventBusConfig *eventbusv1alpha1.BusConfig, eventBusSubject, hostname string, metrics *sensormetrics.Metrics) *SensorContext { +func NewSensorContext(kubeClient kubernetes.Interface, dynamicClient dynamic.Interface, sensor *v1alpha1.Sensor, eventBusConfig *eventbusv1alpha1.BusConfig, eventBusSubject, hostname string, metrics *sensormetrics.Metrics, cfClient *codefresh.Client) *SensorContext { return &SensorContext{ kubeClient: kubeClient, dynamicClient: dynamicClient, @@ -76,16 +85,19 @@ func NewSensorContext(kubeClient kubernetes.Interface, dynamicClient dynamic.Int eventBusConfig: eventBusConfig, eventBusSubject: eventBusSubject, hostname: hostname, - httpClients: make(map[string]*http.Client), - customTriggerClients: make(map[string]*grpc.ClientConn), + httpClients: common.NewStringKeyedMap[*http.Client](), + customTriggerClients: common.NewStringKeyedMap[*grpc.ClientConn](), slackHTTPClient: &http.Client{ Timeout: time.Minute * 5, }, - kafkaProducers: make(map[string]sarama.AsyncProducer), - natsConnections: make(map[string]*natslib.Conn), - awsLambdaClients: make(map[string]*lambda.Lambda), - openwhiskClients: make(map[string]*whisk.Client), - azureEventHubsClients: make(map[string]*eventhubs.Hub), - metrics: metrics, + kafkaProducers: common.NewStringKeyedMap[sarama.AsyncProducer](), + pulsarProducers: common.NewStringKeyedMap[pulsar.Producer](), + natsConnections: common.NewStringKeyedMap[*natslib.Conn](), + awsLambdaClients: common.NewStringKeyedMap[*lambda.Lambda](), + openwhiskClients: common.NewStringKeyedMap[*whisk.Client](), + azureEventHubsClients: common.NewStringKeyedMap[*eventhubs.Hub](), + azureServiceBusClients: common.NewStringKeyedMap[*servicebus.Sender](), + metrics: metrics, + cfClient: cfClient, } } diff --git a/sensors/dependencies/filter.go b/sensors/dependencies/filter.go index 1696a4562a..d96341cdd3 100644 --- a/sensors/dependencies/filter.go +++ b/sensors/dependencies/filter.go @@ -19,9 +19,11 @@ package dependencies import ( "bytes" "encoding/json" + "errors" "fmt" "regexp" "strconv" + "strings" "text/template" "time" @@ -30,153 +32,247 @@ import ( "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" "github.com/tidwall/gjson" + lua "github.com/yuin/gopher-lua" +) + +const ( + errMsgListSeparator = " / " + errMsgTemplate = "%s filter error (%s)" + multiErrMsgTemplate = "%s filter errors [%s]" ) // Filter filters the event with dependency's defined filters -func Filter(event *v1alpha1.Event, filters *v1alpha1.EventDependencyFilter) (bool, error) { - if filters == nil { +func Filter(event *v1alpha1.Event, filter *v1alpha1.EventDependencyFilter, filtersLogicalOperator v1alpha1.LogicalOperator) (bool, error) { + if filter == nil { return true, nil } - ok, err := filterEvent(filters, event) + + ok, err := filterEvent(filter, filtersLogicalOperator, event) if err != nil { return false, err } + return ok, nil } -// apply the filters to an Event -func filterEvent(filter *v1alpha1.EventDependencyFilter, event *v1alpha1.Event) (bool, error) { - dataFilter, err := filterData(filter.Data, event) - if err != nil { - return false, err +// filterEvent applies the filters to an Event +func filterEvent(filter *v1alpha1.EventDependencyFilter, operator v1alpha1.LogicalOperator, event *v1alpha1.Event) (bool, error) { + var errMessages []string + if operator == v1alpha1.OrLogicalOperator { + errMessages = make([]string, 0) } - timeFilter, err := filterTime(filter.Time, event.Context.Time.Time) - if err != nil { - return false, err + + exprFilter, exprErr := filterExpr(filter.Exprs, filter.ExprLogicalOperator, event) + if exprErr != nil { + if operator != v1alpha1.OrLogicalOperator { + return false, exprErr + } + errMessages = append(errMessages, exprErr.Error()) } - ctxFilter := filterContext(filter.Context, event.Context) - exprFilter, err := filterExpr(filter.Exprs, event) - if err != nil { - return false, err + + dataFilter, dataErr := filterData(filter.Data, filter.DataLogicalOperator, event) + if dataErr != nil { + if operator != v1alpha1.OrLogicalOperator { + return false, dataErr + } + errMessages = append(errMessages, dataErr.Error()) } - return timeFilter && ctxFilter && dataFilter && exprFilter, nil -} + ctxFilter := filterContext(filter.Context, event.Context) -// filterTime checks the eventTime falls into time range specified by the timeFilter. -// Start is inclusive, and Stop is exclusive. -// -// if Start < Stop: eventTime must be in [Start, Stop) -// -// 0:00 Start Stop 0:00 -// ├───────────●───────────○───────────┤ -// └─── OK ────┘ -// -// if Stop < Start: eventTime must be in [Start, Stop@Next day) -// -// this is equivalent to: eventTime must be in [0:00, Stop) or [Start, 0:00@Next day) -// -// 0:00 Start 0:00 Stop 0:00 -// ├───────────○───────────●───────────┼───────────○───────────●───────────┤ -// └───────── OK ──────────┘ -// -// 0:00 Stop Start 0:00 -// ●───────────○───────────●───────────○ -// └─── OK ────┘ └─── OK ────┘ -func filterTime(timeFilter *v1alpha1.TimeFilter, eventTime time.Time) (bool, error) { - if timeFilter == nil { - return true, nil + timeFilter, timeErr := filterTime(filter.Time, event.Context.Time.Time) + if timeErr != nil { + if operator != v1alpha1.OrLogicalOperator { + return false, timeErr + } + errMessages = append(errMessages, timeErr.Error()) } - // Parse start and stop - startTime, err := common.ParseTime(timeFilter.Start, eventTime) - if err != nil { - return false, err - } - stopTime, err := common.ParseTime(timeFilter.Stop, eventTime) + scriptFilter, err := filterScript(filter.Script, event) if err != nil { return false, err } - // Filtering logic - if startTime.Before(stopTime) { - return (eventTime.After(startTime) || eventTime.Equal(startTime)) && eventTime.Before(stopTime), nil - } else { - return (eventTime.After(startTime) || eventTime.Equal(startTime)) || eventTime.Before(stopTime), nil + if operator == v1alpha1.OrLogicalOperator { + pass := (filter.Exprs != nil && exprFilter) || + (filter.Data != nil && dataFilter) || + (filter.Context != nil && ctxFilter) || + (filter.Time != nil && timeFilter) || + (filter.Script != "" && scriptFilter) + + if len(errMessages) > 0 { + return pass, errors.New(strings.Join(errMessages, errMsgListSeparator)) + } + return pass, nil } + return exprFilter && dataFilter && ctxFilter && timeFilter && scriptFilter, nil } -// applyContextFilter checks the expected EventContext against the actual EventContext -// values are only enforced if they are non-zero values -// map types check that the expected map is a subset of the actual map -func filterContext(expected *v1alpha1.EventContext, actual *v1alpha1.EventContext) bool { - if expected == nil { - return true +// filterExpr applies expression based filters against event data +// expression evaluation is based on https://github.com/Knetic/govaluate +// in case "operator input" is equal to v1alpha1.OrLogicalOperator, filters are evaluated as mutual exclusive +func filterExpr(filters []v1alpha1.ExprFilter, operator v1alpha1.LogicalOperator, event *v1alpha1.Event) (bool, error) { + if filters == nil { + return true, nil } - if actual == nil { - return false + if event == nil { + return false, fmt.Errorf(errMsgTemplate, "expr", "nil event") } - res := true - if expected.Type != "" { - res = res && expected.Type == actual.Type + payload := event.Data + if payload == nil { + return true, nil } - if expected.Subject != "" { - res = res && expected.Subject == actual.Subject + if !gjson.Valid(string(payload)) { + return false, fmt.Errorf(errMsgTemplate, "expr", "event data not valid JSON") } - if expected.Source != "" { - res = res && expected.Source == actual.Source + + var errMessages []string + if operator == v1alpha1.OrLogicalOperator { + errMessages = make([]string, 0) } - if expected.DataContentType != "" { - res = res && expected.DataContentType == actual.DataContentType +filterExpr: + for _, filter := range filters { + parameters := map[string]interface{}{} + for _, field := range filter.Fields { + pathResult := gjson.GetBytes(payload, field.Path) + if !pathResult.Exists() { + errMsg := "path '%s' does not exist" + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, fmt.Sprintf(errMsg, field.Path)) + continue filterExpr + } else { + return false, fmt.Errorf(errMsgTemplate, "expr", fmt.Sprintf(errMsg, field.Path)) + } + } + parameters[field.Name] = pathResult.Value() + } + + if len(parameters) == 0 { + continue + } + + expr, exprErr := govaluate.NewEvaluableExpression(filter.Expr) + if exprErr != nil { + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, exprErr.Error()) + continue + } else { + return false, fmt.Errorf(errMsgTemplate, "expr", exprErr.Error()) + } + } + + result, resErr := expr.Evaluate(parameters) + if resErr != nil { + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, resErr.Error()) + continue + } else { + return false, fmt.Errorf(errMsgTemplate, "expr", resErr.Error()) + } + } + + if result == true { + if operator == v1alpha1.OrLogicalOperator { + return true, nil + } + } else { + if operator != v1alpha1.OrLogicalOperator { + return false, nil + } + } + } + + if operator == v1alpha1.OrLogicalOperator { + if len(errMessages) > 0 { + return false, fmt.Errorf(multiErrMsgTemplate, "expr", strings.Join(errMessages, errMsgListSeparator)) + } + return false, nil + } else { + return true, nil } - return res } -// applyDataFilter runs the dataFilter against the Event's data +// filterData runs the dataFilter against the Event's data // returns (true, nil) when data passes filters, false otherwise -func filterData(data []v1alpha1.DataFilter, event *v1alpha1.Event) (bool, error) { - if data == nil { +// in case "operator input" is equal to v1alpha1.OrLogicalOperator, filters are evaluated as mutual exclusive +func filterData(filters []v1alpha1.DataFilter, operator v1alpha1.LogicalOperator, event *v1alpha1.Event) (bool, error) { + if len(filters) == 0 { return true, nil } if event == nil { - return false, fmt.Errorf("nil Event") + return false, fmt.Errorf(errMsgTemplate, "data", "nil Event") } payload := event.Data if payload == nil { return true, nil } - var js *json.RawMessage - if err := json.Unmarshal(payload, &js); err != nil { - return false, err + if !gjson.Valid(string(payload)) { + return false, fmt.Errorf(errMsgTemplate, "data", "event data not valid JSON") } - var jsData []byte - jsData, err := json.Marshal(js) - if err != nil { - return false, err + + var errMessages []string + if operator == v1alpha1.OrLogicalOperator { + errMessages = make([]string, 0) } -filter: - for _, f := range data { - res := gjson.GetBytes(jsData, f.Path) - if !res.Exists() { - return false, nil +filterData: + for _, f := range filters { + pathResult := gjson.GetBytes(payload, f.Path) + if !pathResult.Exists() { + errMsg := "path '%s' does not exist" + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, fmt.Sprintf(errMsg, f.Path)) + continue + } else { + return false, fmt.Errorf(errMsgTemplate, "data", fmt.Sprintf(errMsg, f.Path)) + } + } + + if f.Value == nil || len(f.Value) == 0 { + errMsg := "no values specified" + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, errMsg) + continue + } else { + return false, fmt.Errorf(errMsgTemplate, "data", errMsg) + } } if f.Template != "" { - tpl, err := template.New("param").Funcs(sprig.HermeticTxtFuncMap()).Parse(f.Template) - if err != nil { - return false, err + tpl, tplErr := template.New("param").Funcs(sprig.FuncMap()).Parse(f.Template) + if tplErr != nil { + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, tplErr.Error()) + continue + } else { + return false, fmt.Errorf(errMsgTemplate, "data", tplErr.Error()) + } } + var buf bytes.Buffer - if err := tpl.Execute(&buf, map[string]interface{}{ - "Input": res.String(), - }); err != nil { - return false, err + execErr := tpl.Execute(&buf, map[string]interface{}{ + "Input": pathResult.String(), + }) + if execErr != nil { + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, execErr.Error()) + continue + } else { + return false, fmt.Errorf(errMsgTemplate, "data", execErr.Error()) + } } + out := buf.String() if out == "" || out == "" { - return false, fmt.Errorf("template evaluated to empty string or no value: %s", f.Template) + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, fmt.Sprintf("template evaluated to empty string or no value: '%s'", f.Template)) + continue + } else { + return false, fmt.Errorf(errMsgTemplate, "data", + fmt.Sprintf("template '%s' evaluated to empty string or no value", f.Template)) + } } - res = gjson.Parse(strconv.Quote(out)) + + pathResult = gjson.Parse(strconv.Quote(out)) } switch f.Type { @@ -184,82 +280,217 @@ filter: for _, value := range f.Value { val, err := strconv.ParseBool(value) if err != nil { - return false, err + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, err.Error()) + continue filterData + } else { + return false, fmt.Errorf(errMsgTemplate, "data", err.Error()) + } } - if val == res.Bool() { - continue filter + + if val == pathResult.Bool() { + if operator == v1alpha1.OrLogicalOperator { + return true, nil + } else { + continue filterData + } } } - return false, nil + + if operator == v1alpha1.OrLogicalOperator { + continue filterData + } else { + return false, nil + } case v1alpha1.JSONTypeNumber: for _, value := range f.Value { filterVal, err := strconv.ParseFloat(value, 64) - eventVal := res.Float() + eventVal := pathResult.Float() if err != nil { - return false, err + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, err.Error()) + continue filterData + } else { + return false, fmt.Errorf(errMsgTemplate, "data", err.Error()) + } } + compareResult := false switch f.Comparator { case v1alpha1.GreaterThanOrEqualTo: if eventVal >= filterVal { - continue filter + compareResult = true } case v1alpha1.GreaterThan: if eventVal > filterVal { - continue filter + compareResult = true } case v1alpha1.LessThan: if eventVal < filterVal { - continue filter + compareResult = true } case v1alpha1.LessThanOrEqualTo: if eventVal <= filterVal { - continue filter + compareResult = true } case v1alpha1.NotEqualTo: if eventVal != filterVal { - continue filter + compareResult = true } case v1alpha1.EqualTo, v1alpha1.EmptyComparator: if eventVal == filterVal { - continue filter + compareResult = true } } + + if compareResult { + if operator == v1alpha1.OrLogicalOperator { + return true, nil + } else { + continue filterData + } + } + } + if operator == v1alpha1.OrLogicalOperator { + continue filterData + } else { + return false, nil } - return false, nil case v1alpha1.JSONTypeString: for _, value := range f.Value { exp, err := regexp.Compile(value) if err != nil { - return false, err + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, err.Error()) + continue filterData + } else { + return false, fmt.Errorf(errMsgTemplate, "data", err.Error()) + } } - match := exp.Match([]byte(res.String())) + matchResult := false + match := exp.Match([]byte(pathResult.String())) switch f.Comparator { case v1alpha1.EqualTo, v1alpha1.EmptyComparator: if match { - continue filter + matchResult = true } case v1alpha1.NotEqualTo: if !match { - continue filter + matchResult = true + } + } + + if matchResult { + if operator == v1alpha1.OrLogicalOperator { + return true, nil + } else { + continue filterData } } } - return false, nil + + if operator == v1alpha1.OrLogicalOperator { + continue filterData + } else { + return false, nil + } default: - return false, fmt.Errorf("unsupported JSON type %s", f.Type) + errMsg := "unsupported JSON type '%s'" + if operator == v1alpha1.OrLogicalOperator { + errMessages = append(errMessages, fmt.Sprintf(errMsg, f.Type)) + continue filterData + } else { + return false, fmt.Errorf(errMsgTemplate, "data", fmt.Sprintf(errMsg, f.Type)) + } } } - return true, nil + + if operator == v1alpha1.OrLogicalOperator { + if len(errMessages) > 0 { + return false, fmt.Errorf(multiErrMsgTemplate, "data", strings.Join(errMessages, errMsgListSeparator)) + } + return false, nil + } else { + return true, nil + } } -// filterExpr applies expression based filters against event data -func filterExpr(filters []v1alpha1.ExprFilter, event *v1alpha1.Event) (bool, error) { - if filters == nil { +// filterContext checks the expectedResult EventContext against the actual EventContext +// values are only enforced if they are non-zero values +// map types check that the expectedResult map is a subset of the actual map +func filterContext(expected *v1alpha1.EventContext, actual *v1alpha1.EventContext) bool { + if expected == nil { + return true + } + if actual == nil { + return false + } + + res := true + if expected.Type != "" { + res = res && expected.Type == actual.Type + } + if expected.Subject != "" { + res = res && expected.Subject == actual.Subject + } + if expected.Source != "" { + res = res && expected.Source == actual.Source + } + if expected.DataContentType != "" { + res = res && expected.DataContentType == actual.DataContentType + } + return res +} + +// filterTime checks the eventTime falls into time range specified by the timeFilter. +// Start is inclusive, and Stop is exclusive. +// +// if Start < Stop: eventTime must be in [Start, Stop) +// +// 0:00 Start Stop 0:00 +// ├───────────●───────────○───────────┤ +// └─── OK ────┘ +// +// if Stop < Start: eventTime must be in [Start, Stop@Next day) +// +// this is equivalent to: eventTime must be in [0:00, Stop) or [Start, 0:00@Next day) +// +// 0:00 Start 0:00 Stop 0:00 +// ├───────────○───────────●───────────┼───────────○───────────●───────────┤ +// └───────── OK ──────────┘ +// +// 0:00 Stop Start 0:00 +// ●───────────○───────────●───────────○ +// └─── OK ────┘ └─── OK ────┘ +func filterTime(timeFilter *v1alpha1.TimeFilter, eventTime time.Time) (bool, error) { + if timeFilter == nil { + return true, nil + } + + // Parse start and stop + startTime, startErr := common.ParseTime(timeFilter.Start, eventTime) + if startErr != nil { + return false, fmt.Errorf(errMsgTemplate, "time", startErr.Error()) + } + stopTime, stopErr := common.ParseTime(timeFilter.Stop, eventTime) + if stopErr != nil { + return false, fmt.Errorf(errMsgTemplate, "time", stopErr.Error()) + } + + // Filtering logic + if startTime.Before(stopTime) { + return (eventTime.After(startTime) || eventTime.Equal(startTime)) && eventTime.Before(stopTime), nil + } else { + return (eventTime.After(startTime) || eventTime.Equal(startTime)) || eventTime.Before(stopTime), nil + } +} + +func filterScript(script string, event *v1alpha1.Event) (bool, error) { + if script == "" { return true, nil } if event == nil { @@ -278,31 +509,17 @@ func filterExpr(filters []v1alpha1.ExprFilter, event *v1alpha1.Event) (bool, err if err != nil { return false, err } - - for _, filter := range filters { - parameters := map[string]interface{}{} - for _, field := range filter.Fields { - result := gjson.GetBytes(jsData, field.Path) - if !result.Exists() { - return false, fmt.Errorf("path %s does not exist", field.Path) - } - parameters[field.Name] = result.Value() - } - if len(parameters) == 0 { - continue - } - expr, err := govaluate.NewEvaluableExpression(filter.Expr) - if err != nil { - return false, err - } - result, err := expr.Evaluate(parameters) - if err != nil { - return false, err - } - if result == true { - return true, nil - } + l := lua.NewState() + defer l.Close() + var payloadJson map[string]interface{} + if err = json.Unmarshal(jsData, &payloadJson); err != nil { + return false, err } - - return false, nil + lEvent := mapToTable(payloadJson) + l.SetGlobal("event", lEvent) + if err = l.DoString(script); err != nil { + return false, err + } + lv := l.Get(-1) + return lv == lua.LTrue, nil } diff --git a/sensors/dependencies/filter_ctx_test.go b/sensors/dependencies/filter_ctx_test.go new file mode 100644 index 0000000000..a1949b3a47 --- /dev/null +++ b/sensors/dependencies/filter_ctx_test.go @@ -0,0 +1,92 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dependencies + +import ( + "testing" + "time" + + "github.com/argoproj/argo-events/common" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" +) + +func TestFilterContext(t *testing.T) { + tests := []struct { + name string + expectedContext *v1alpha1.EventContext + actualContext *v1alpha1.EventContext + result bool + }{ + { + name: "different event contexts", + expectedContext: &v1alpha1.EventContext{ + Type: "webhook", + }, + actualContext: &v1alpha1.EventContext{ + Type: "calendar", + Source: "calendar-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Now().UTC(), + }, + DataContentType: common.MediaTypeJSON, + Subject: "example-1", + }, + result: false, + }, + { + name: "contexts are same", + expectedContext: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + actualContext: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Now().UTC(), + }, + DataContentType: common.MediaTypeJSON, + Subject: "example-1", + }, + result: true, + }, + { + name: "actual event context is nil", + expectedContext: &v1alpha1.EventContext{}, + actualContext: nil, + result: false, + }, + { + name: "expectedResult event context is nil", + expectedContext: nil, + result: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := filterContext(test.expectedContext, test.actualContext) + assert.Equal(t, test.result, result) + }) + } +} diff --git a/sensors/dependencies/filter_data_test.go b/sensors/dependencies/filter_data_test.go new file mode 100644 index 0000000000..054acd51bd --- /dev/null +++ b/sensors/dependencies/filter_data_test.go @@ -0,0 +1,711 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dependencies + +import ( + "testing" + + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" +) + +func TestFilterData(t *testing.T) { + type args struct { + data []v1alpha1.DataFilter + operator v1alpha1.LogicalOperator + event *v1alpha1.Event + } + + tests := []struct { + name string + args args + expectedResult bool + expectErr bool + }{ + { + name: "nil event", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: nil, + }, + expectedResult: false, + expectErr: true, + }, + { + name: "unsupported content type", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{Data: []byte("a")}, + }, + expectedResult: false, + expectErr: true, + }, + { + name: "empty data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + }, + }, + expectedResult: true, + expectErr: false, + }, + { + name: "nil filters, JSON data", + args: args{ + data: nil, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v"}`), + }, + }, + expectedResult: true, + expectErr: false, + }, + { + name: "invalid filter path, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "", + Type: v1alpha1.JSONTypeString, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v"}`), + }, + }, + expectedResult: false, + expectErr: true, + }, + { + name: "invalid filter type, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v"}`), + }, + }, + expectedResult: false, + expectErr: true, + }, + { + name: "invalid filter values, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v"}`), + }, + }, + expectedResult: false, + expectErr: true, + }, + { + name: "string filter, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v"}`), + }, + }, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter EqualTo, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + Comparator: "=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v"}`), + }, + }, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter NotEqualTo, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"b"}, + Comparator: "!=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v"}`), + }, + }, + expectedResult: true, + expectErr: false, + }, + { + name: "number filter (data: string, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "1.0"}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "comparator filter GreaterThan return true (data: string, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + Comparator: ">", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "2.0"}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "comparator filter LessThanOrEqualTo return false (data: string, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + Comparator: "<=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "2.0"}`), + }}, + expectedResult: false, + expectErr: false, + }, + { + name: "comparator filter NotEqualTo (data: string, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + Comparator: "!=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "1.0"}`), + }}, + expectedResult: false, + expectErr: false, + }, + { + name: "comparator filter EqualTo (data: string, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"5.0"}, + Comparator: "=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "5.0"}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "comparator filter empty (data: string, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"10.0"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "10.0"}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "number filter (data: number, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": 1.0}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "comparator filter GreaterThan return true (data: number, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + Comparator: ">", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": 2.0}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "comparator filter LessThanOrEqualTo return false (data: number, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + Comparator: "<=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": 2.0}`), + }}, + expectedResult: false, + expectErr: false, + }, + { + name: "comparator filter NotEqualTo (data: number, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"1.0"}, + Comparator: "!=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": 1.0}`), + }}, + expectedResult: false, + expectErr: false, + }, + { + name: "comparator filter EqualTo (data: number, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"5.0"}, + Comparator: "=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": 5.0}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "comparator filter empty (data: number, filter: number), JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"10.0"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": 10.0}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "multiple filters return false, nested JSON data, EMPTY operator", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + { + Path: "k1.k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"2.14"}, + }, + { + Path: "k1.k2", + Type: v1alpha1.JSONTypeString, + Value: []string{"hello,world", "hello there"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": true, "k1": {"k": 3.14, "k2": "hello, world"}}`), + }}, + expectedResult: false, + expectErr: false, + }, + { + name: "multiple filters return false, nested JSON data, AND operator", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeBool, + Value: []string{"true"}, + }, + { + Path: "k1.k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"3.14"}, + }, + { + Path: "k1.k2", + Type: v1alpha1.JSONTypeString, + Value: []string{"hello,world", "hello, world", "hello there"}, + }, + }, + operator: v1alpha1.AndLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": true, "k1": {"k": 3.14, "k2": "hello, world"}}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "multiple filters return true, nested JSON data, OR operator", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeBool, + Value: []string{"false"}, + }, + { + Path: "k1.k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"3.14"}, + }, + { + Path: "k1.k2", + Type: v1alpha1.JSONTypeString, + Value: []string{"hello,world", "hello there"}, + }, + }, + operator: v1alpha1.OrLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": true, "k1": {"k": 3.14, "k2": "hello, world"}}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter Regex, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: `[k,k1.a.#(k2=="v2").k2]`, + Type: v1alpha1.JSONTypeString, + Value: []string{"\\bv\\b.*\\bv2\\b"}, + Comparator: "=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v", "k1": {"a": [{"k2": "v2"}]}}`), + }, + }, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter Regex2, JSON data", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: `[k,k1.a.#(k2=="v2").k2,,k1.a.#(k2=="v3").k2]`, + Type: v1alpha1.JSONTypeString, + Value: []string{"(\\bz\\b.*\\bv2\\b)|(\\bv\\b.*(\\bv2\\b.*\\bv3\\b))"}, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "v", "k1": {"a": [{"k2": "v2"}, {"k2": "v3"}]}}`), + }, + }, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter base64, uppercase template", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"HELLO WORLD"}, + Template: `{{ b64dec .Input | upper }}`, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "aGVsbG8gd29ybGQ="}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter base64 template", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"3.13"}, + Comparator: ">", + Template: `{{ b64dec .Input }}`, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "My4xNA=="}`), // 3.14 + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter base64 template, comparator not equal", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"hello world"}, + Template: `{{ b64dec .Input }}`, + Comparator: "!=", + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "aGVsbG8gd29ybGQ"}`), + }}, + expectedResult: true, + expectErr: false, + }, + { + name: "string filter base64 template, regex", + args: args{ + data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"world$"}, + Template: `{{ b64dec .Input }}`, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + Data: []byte(`{"k": "aGVsbG8gd29ybGQ="}`), + }}, + expectedResult: true, + expectErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := filterData(test.args.data, test.args.operator, test.args.event) + if (err != nil) != test.expectErr { + t.Errorf("filterData() error = %v, expectErr %v", err, test.expectErr) + return + } + if got != test.expectedResult { + t.Errorf("filterData() = %v, expectedResult %v", got, test.expectedResult) + } + }) + } +} diff --git a/sensors/dependencies/filter_event_test.go b/sensors/dependencies/filter_event_test.go new file mode 100644 index 0000000000..77ebb7eb88 --- /dev/null +++ b/sensors/dependencies/filter_event_test.go @@ -0,0 +1,672 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dependencies + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" +) + +func TestFilterEvent_All(t *testing.T) { + t.Run("test all valid", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "19:19:19", + }, + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k != "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, valid) + }) + + t.Run("test time not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "10:10:10", + }, + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k != "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) + + t.Run("test ctx not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "19:19:19", + }, + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k != "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-fake", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) + + t.Run("test data not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "19:19:19", + }, + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k == "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "x"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) + + t.Run("test expr not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "19:19:19", + }, + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k != "v"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) +} + +func TestFilterEvent_Expr(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k != "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, valid) + }) + + t.Run("test not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k != "v"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) + + t.Run("test error", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `k !== "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "k", + Name: "k", + }, + }, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, valid) + }) +} + +func TestFilterEvent_Data(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, valid) + }) + + t.Run("test not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) + + t.Run("test error", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, valid) + }) +} + +func TestFilterEvent_Context(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, valid) + }) + + t.Run("test not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-fake", + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) +} + +func TestFilterEvent_Time(t *testing.T) { + t.Run("test valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "19:19:19", + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, valid) + }) + + t.Run("test not valid", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "10:10:10", + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, valid) + }) + + t.Run("test error", func(t *testing.T) { + filter := v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:09", + Stop: "10:10", + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + valid, err := filterEvent(&filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, valid) + }) +} diff --git a/sensors/dependencies/filter_expr_test.go b/sensors/dependencies/filter_expr_test.go new file mode 100644 index 0000000000..87a99cd585 --- /dev/null +++ b/sensors/dependencies/filter_expr_test.go @@ -0,0 +1,611 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dependencies + +import ( + "testing" + + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + "github.com/stretchr/testify/assert" +) + +func TestFilterExpr(t *testing.T) { + tests := []struct { + name string + event *v1alpha1.Event + filters []v1alpha1.ExprFilter + operator v1alpha1.LogicalOperator + expectedResult bool + expectedErrMsg string + }{ + { + name: "nil event", + event: nil, + filters: []v1alpha1.ExprFilter{ + { + Expr: `a == "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a", + Name: "a", + }, + }, + }, + }, + expectedResult: false, + expectedErrMsg: "expr filter error (nil event)", + }, + { + name: "unsupported content type", + event: &v1alpha1.Event{Data: []byte("a")}, + filters: []v1alpha1.ExprFilter{ + { + Expr: `a == "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a", + Name: "a", + }, + }, + }, + }, + expectedResult: false, + expectedErrMsg: "expr filter error (event data not valid JSON)", + }, + { + name: "empty data", + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `a == "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a", + Name: "a", + }, + }, + }, + }, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "nil filters", + event: &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + DataContentType: "application/json", + }, + }, + filters: nil, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "simple string equal", + event: &v1alpha1.Event{ + Data: []byte(`{"a": "b"}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `a == "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a", + Name: "a", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "simple string different than", + event: &v1alpha1.Event{ + Data: []byte(`{"a": "c"}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `a != "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a", + Name: "a", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "nested string equal", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "c"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: false, + expectedErrMsg: "", + }, + { + name: "number equal", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": 2}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == 2`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "number less than", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": 2}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b < 1`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: false, + expectedErrMsg: "", + }, + { + name: "string contains", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "start long string"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b =~ "start"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "string does not contain", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "long string"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b !~ "start"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "multiple filters, EMPTY operator", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": "y"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `d == "y"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.d", + Name: "d", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "multiple filters, AND operator", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": "y"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `d == "d"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.d", + Name: "d", + }, + }, + }, + }, + operator: v1alpha1.AndLogicalOperator, + expectedResult: false, + expectedErrMsg: "", + }, + { + name: "multiple filters, OR operator", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": "y"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `d == "d"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.d", + Name: "d", + }, + }, + }, + }, + operator: v1alpha1.OrLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "multiple filters, OR operator, one field not existing", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": "y"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `c == "c"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.c", + Name: "c", + }, + }, + }, + }, + operator: v1alpha1.OrLogicalOperator, + expectedResult: false, + expectedErrMsg: "expr filter errors [path 'a.c' does not exist]", + }, + { + name: "multiple filters, OR operator, one field not existing but not reached because first filter is false", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": {"e": true}}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `c == "c"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.c", + Name: "c", + }, + }, + }, + { + Expr: `e == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.d.e", + Name: "e", + }, + }, + }, + }, + operator: v1alpha1.OrLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "multiple filters, EMPTY operator, one field not existing but not reached because first filter is false", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": "y"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "b"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `c == "c"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.c", + Name: "c", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: false, + expectedErrMsg: "", + }, + { + name: "multiple filters, EMPTY operator, one field not existing", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": "y"}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `c == "c"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.c", + Name: "c", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: false, + expectedErrMsg: "expr filter error (path 'a.c' does not exist)", + }, + { + name: "multiple filters, AND operator, one field not existing", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "d": {"e": true}}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "x"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + }, + }, + { + Expr: `c == "c"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.c", + Name: "c", + }, + }, + }, + { + Expr: `e == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.d.e", + Name: "e", + }, + }, + }, + }, + operator: v1alpha1.AndLogicalOperator, + expectedResult: false, + expectedErrMsg: "expr filter error (path 'a.c' does not exist)", + }, + { + name: "AND comparator inside expr (different than expr logical operator), one field not existing", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "c": {"d": true}}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b != "b" && d == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + { + Path: "a.d", + Name: "d", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: false, + expectedErrMsg: "expr filter error (path 'a.d' does not exist)", + }, + { + name: "AND comparator inside expr (different than expr logical operator)", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "x", "c": {"d": true}}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b != "b" && d == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + { + Path: "a.c.d", + Name: "d", + }, + }, + }, + }, + operator: v1alpha1.OrLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "OR comparator inside expr (different than expr logical operator)", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "b", "c": {"d": false}}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "b" || d == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + { + Path: "a.c.d", + Name: "d", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + { + name: "multiple comparators inside expr (different than expr logical operator)", + event: &v1alpha1.Event{ + Data: []byte(`{"a": {"b": "b", "c": {"d": false}, "e": 2}}`), + }, + filters: []v1alpha1.ExprFilter{ + { + Expr: `b == "b" || (d == true && e == 2)`, + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "b", + }, + { + Path: "a.c.d", + Name: "d", + }, + }, + }, + }, + operator: v1alpha1.EmptyLogicalOperator, + expectedResult: true, + expectedErrMsg: "", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + actualResult, actualErr := filterExpr(test.filters, test.operator, test.event) + + if (test.expectedErrMsg != "" && actualErr == nil) || + (test.expectedErrMsg == "" && actualErr != nil) { + t.Logf("'%s' test failed: expectedResult error '%s' got '%v'", + test.name, test.expectedErrMsg, actualErr) + } + if test.expectedErrMsg != "" { + assert.EqualError(t, actualErr, test.expectedErrMsg) + } else { + assert.NoError(t, actualErr) + } + + if test.expectedResult != actualResult { + t.Logf("'%s' test failed: expectedResult result '%t' got '%t'", + test.name, test.expectedResult, actualResult) + } + assert.Equal(t, test.expectedResult, actualResult) + }) + } +} diff --git a/sensors/dependencies/filter_script_test.go b/sensors/dependencies/filter_script_test.go new file mode 100644 index 0000000000..9d97dfd434 --- /dev/null +++ b/sensors/dependencies/filter_script_test.go @@ -0,0 +1,67 @@ +package dependencies + +import ( + "testing" + + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + "github.com/stretchr/testify/assert" +) + +func TestScriptFilter(t *testing.T) { + tests := []struct { + script string + event *v1alpha1.Event + result bool + hasError bool + }{ + { + script: ` +if event.a == "hello" then return true else return false end +`, + event: &v1alpha1.Event{ + Data: []byte(`{"a":"hello"}`), + }, + result: true, + hasError: false, + }, + { + script: ` +if event.a == "hello" and event.b == "world" then return false else return true end +`, + event: &v1alpha1.Event{ + Data: []byte(`{"a":"hello","b":"world"}`), + }, + result: false, + hasError: false, + }, + { + script: ` +if event.a == "hello" return false else return true end +`, + event: &v1alpha1.Event{ + Data: []byte(`{"a":"hello"}`), + }, + result: false, + hasError: true, + }, + { + script: ` +if a.a == "hello" then return true else return false end +`, + event: &v1alpha1.Event{ + Data: []byte(`{"a":"hello"}`), + }, + result: false, + hasError: true, + }, + } + for _, tt := range tests { + result, err := filterScript(tt.script, tt.event) + if tt.hasError { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + assert.Equal(t, tt.result, result) + } + } +} diff --git a/sensors/dependencies/filter_test.go b/sensors/dependencies/filter_test.go index f99cff1a88..f6f11dfee6 100644 --- a/sensors/dependencies/filter_test.go +++ b/sensors/dependencies/filter_test.go @@ -17,651 +17,972 @@ limitations under the License. package dependencies import ( + "encoding/json" "testing" "time" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) -func TestFilterContext(t *testing.T) { - tests := []struct { - name string - expectedContext *v1alpha1.EventContext - actualContext *v1alpha1.EventContext - result bool - }{ - { - name: "different event contexts", - expectedContext: &v1alpha1.EventContext{ - Type: "webhook", - }, - actualContext: &v1alpha1.EventContext{ - Type: "calendar", - Source: "calendar-gateway", - ID: "1", +func TestFilter(t *testing.T) { + t.Run("test empty", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{} + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + pass, err := Filter(event, filter, filtersLogicalOperator) + + assert.NoError(t, err) + assert.True(t, pass) + }) + + t.Run("test event passing", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, pass) + }) + + t.Run("test event not passing", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Data: []v1alpha1.DataFilter{ + { + Path: "z", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, pass) + }) + + t.Run("test error", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Time: &v1alpha1.TimeFilter{ + Start: "09:09:0", + Stop: "19:19:19", + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", Time: metav1.Time{ - Time: time.Now().UTC(), + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), }, - DataContentType: common.MediaTypeJSON, + DataContentType: "application/json", Subject: "example-1", }, - result: false, - }, - { - name: "contexts are same", - expectedContext: &v1alpha1.EventContext{ + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, pass) + }) + + t.Run("test 'empty' filtersLogicalOperator", func(t *testing.T) { + // ctx filter: true + // data filter: false + filter := &v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ Type: "webhook", Source: "webhook-gateway", }, - actualContext: &v1alpha1.EventContext{ + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"z"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.EmptyLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ Type: "webhook", SpecVersion: "0.3", Source: "webhook-gateway", ID: "1", Time: metav1.Time{ - Time: time.Now().UTC(), + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), }, - DataContentType: common.MediaTypeJSON, + DataContentType: "application/json", Subject: "example-1", }, - result: true, - }, - { - name: "actual event context is nil", - expectedContext: &v1alpha1.EventContext{}, - actualContext: nil, - result: false, - }, - { - name: "expected event context is nil", - expectedContext: nil, - result: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - result := filterContext(test.expectedContext, test.actualContext) - assert.Equal(t, test.result, result) - }) - } -} + Data: []byte(`{"k": "v"}`), + } -func TestFilterData(t *testing.T) { - type args struct { - data []v1alpha1.DataFilter - event *v1alpha1.Event - } - tests := []struct { - name string - args args - want bool - wantErr bool - }{ - { - name: "nil event", - args: args{data: nil, event: nil}, - want: true, - wantErr: false, - }, - { - name: "unsupported content type", - args: args{data: nil, event: &v1alpha1.Event{Data: []byte("a")}}, - want: true, - wantErr: false, - }, - { - name: "empty data", - args: args{data: nil, event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - }}, - want: true, - wantErr: false, - }, - { - name: "nil filters, JSON data", - args: args{data: nil, event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"v\"}"), - }}, - want: true, - wantErr: false, - }, - { - name: "string filter, JSON data", - args: args{ - data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"v"}, - }, + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, pass) + }) + + t.Run("test 'empty' filtersLogicalOperator with error", func(t *testing.T) { + // ctx filter: true + // data filter: error (false) + filter := &v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "z", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"v\"}"), - }, - }, - want: true, - wantErr: false, - }, - { - name: "string filter EqualTo, JSON data", - args: args{ - data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"v"}, - Comparator: "=", - }, + }, + } + filtersLogicalOperator := v1alpha1.EmptyLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"v\"}"), - }, - }, - want: true, - wantErr: false, - }, - { - name: "string filter NotEqualTo, JSON data", - args: args{ - data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"b"}, - Comparator: "!=", - }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, pass) + }) + + t.Run("test 'and' filtersLogicalOperator", func(t *testing.T) { + // ctx filter: false + // data filter: true + filter := &v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-fake", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "k", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"v\"}"), + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), }, + DataContentType: "application/json", + Subject: "example-1", }, - want: true, - wantErr: false, - }, - { - name: "number filter, JSON data", - args: args{data: []v1alpha1.DataFilter{ + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, pass) + }) + + t.Run("test 'and' filtersLogicalOperator with error", func(t *testing.T) { + // ctx filter: true + // data filter: error (false) + filter := &v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ { - Path: "k", + Path: "z", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, pass) + }) + + t.Run("test 'or' filtersLogicalOperator", func(t *testing.T) { + // ctx filter: true + // data filter: false + filter := &v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "z", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.OrLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.True(t, pass) + }) + + t.Run("test 'or' filtersLogicalOperator with error", func(t *testing.T) { + // ctx filter: true + // data filter: error (false) + filter := &v1alpha1.EventDependencyFilter{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + Source: "webhook-gateway", + }, + Data: []v1alpha1.DataFilter{ + { + Path: "z", + Type: v1alpha1.JSONTypeString, + Value: []string{"v"}, + }, + }, + } + filtersLogicalOperator := v1alpha1.OrLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Type: "webhook", + SpecVersion: "0.3", + Source: "webhook-gateway", + ID: "1", + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + DataContentType: "application/json", + Subject: "example-1", + }, + Data: []byte(`{"k": "v"}`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.True(t, pass) + }) + + t.Run("test advanced logic: (A && B) && (C && D)", func(t *testing.T) { + // data filter: A && B == true + // expr filter: C && D == true + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.EmptyLogicalOperator, // default AND + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", Type: v1alpha1.JSONTypeNumber, - Value: []string{"1.0"}, + Value: []string{"10"}, }, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"1.0\"}"), - }}, - want: true, - wantErr: false, - }, - { - name: "comparator filter GreaterThan return true, JSON data", - args: args{data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeNumber, - Value: []string{"1.0"}, - Comparator: ">", - }, - }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"2.0\"}"), - }}, - want: true, - wantErr: false, - }, - { - name: "comparator filter LessThanOrEqualTo return false, JSON data", - args: args{data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeNumber, - Value: []string{"1.0"}, - Comparator: "<=", - }, - }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"2.0\"}"), - }}, - want: false, - wantErr: false, - }, - { - name: "comparator filter NotEqualTo, JSON data", - args: args{data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeNumber, - Value: []string{"1.0"}, - Comparator: "!=", - }, - }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + ExprLogicalOperator: v1alpha1.EmptyLogicalOperator, // default AND + Exprs: []v1alpha1.ExprFilter{ + { + // C + Expr: `d == "hello world"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.d", + Name: "d", + }, }, - Data: []byte("{\"k\": \"1.0\"}"), - }}, - want: false, - wantErr: false, - }, - { - name: "comparator filter EqualTo, JSON data", - args: args{data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeNumber, - Value: []string{"5.0"}, - Comparator: "=", - }, - }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + }, + { + // D + Expr: `e == false`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", + }, }, - Data: []byte("{\"k\": \"5.0\"}"), - }}, - want: true, - wantErr: false, - }, - { - name: "comparator filter empty, JSON data", - args: args{data: []v1alpha1.DataFilter{ + }, + }, + } + filtersLogicalOperator := v1alpha1.EmptyLogicalOperator // default AND + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, pass) + }) + + t.Run("test advanced logic: (A && B) && (C && D) with error", func(t *testing.T) { + // data filter: A && B == error (false) + // expr filter: C && D == true + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.EmptyLogicalOperator, // default AND + Data: []v1alpha1.DataFilter{ { - Path: "k", + // A + Path: "z", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", Type: v1alpha1.JSONTypeNumber, - Value: []string{"10.0"}, + Value: []string{"10"}, }, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"10.0\"}"), - }}, - want: true, - wantErr: false, - }, - { - name: "multiple filters, nested JSON data", - args: args{ - data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"v"}, - }, - { - Path: "k1.k", - Type: v1alpha1.JSONTypeNumber, - Value: []string{"3.14"}, + ExprLogicalOperator: v1alpha1.EmptyLogicalOperator, // default AND + Exprs: []v1alpha1.ExprFilter{ + { + // C + Expr: `d == "hello world"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.d", + Name: "d", + }, }, - { - Path: "k1.k2", - Type: v1alpha1.JSONTypeString, - Value: []string{"hello,world", "hello there"}, + }, + { + // D + Expr: `e == false"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", + }, }, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + }, + } + filtersLogicalOperator := v1alpha1.EmptyLogicalOperator // default AND + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.Error(t, err) + assert.False(t, pass) + }) + + t.Run("test advanced logic: (A && B) || (C && D)", func(t *testing.T) { + // data filter: A && B == true + // expr filter: C && D == false + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.AndLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"10"}, + }, + }, + ExprLogicalOperator: v1alpha1.AndLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ + { + // C + Expr: `d == "hello world"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.d", + Name: "d", + }, }, - Data: []byte("{\"k\": true, \"k1\": {\"k\": 3.14, \"k2\": \"hello, world\"}}"), - }}, - want: false, - wantErr: false, - }, - { - name: "string filter Regex, JSON data", - args: args{ - data: []v1alpha1.DataFilter{ - { - Path: "[k,k1.a.#(k2==\"v2\").k2]", - Type: v1alpha1.JSONTypeString, - Value: []string{"\\bv\\b.*\\bv2\\b"}, - Comparator: "=", + }, + { + // D + Expr: `e == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", + }, }, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + }, + } + filtersLogicalOperator := v1alpha1.OrLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, pass) + }) + + t.Run("test advanced logic: (A || B) && (C || D)", func(t *testing.T) { + // data filter: A || B == true + // expr filter: C || D == false + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.OrLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"11"}, + }, + }, + ExprLogicalOperator: v1alpha1.OrLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ + { + // C + Expr: `d == "hello world"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.d", + Name: "d", + }, }, - Data: []byte("{\"k\": \"v\", \"k1\": {\"a\": [{\"k2\": \"v2\"}]}}"), - }, - }, - want: true, - wantErr: false, - }, - { - name: "string filter Regex2, JSON data", - args: args{ - data: []v1alpha1.DataFilter{ - { - Path: "[k,k1.a.#(k2==\"v2\").k2,,k1.a.#(k2==\"v3\").k2]", - Type: v1alpha1.JSONTypeString, - Value: []string{"(\\bz\\b.*\\bv2\\b)|(\\bv\\b.*(\\bv2\\b.*\\bv3\\b))"}, + }, + { + // D + Expr: `e == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", + }, }, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + }, + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, pass) + }) + + t.Run("filtersLogicalOperator == 'or' with only a subset of filters specified", func(t *testing.T) { + filter := &v1alpha1.EventDependencyFilter{ + Exprs: []v1alpha1.ExprFilter{ + { + Expr: `A == "not-valid"`, // this will evaluate to false + Fields: []v1alpha1.PayloadField{ + { + Path: "a.b", + Name: "A", + }, }, - Data: []byte("{\"k\": \"v\", \"k1\": {\"a\": [{\"k2\": \"v2\"}, {\"k2\": \"v3\"}]}}"), }, }, - want: true, - wantErr: false, - }, - { - name: "string filter base64, uppercase template", - args: args{data: []v1alpha1.DataFilter{ + Data: []v1alpha1.DataFilter{ // these evaluate to false { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"HELLO WORLD"}, - Template: "{{ b64dec .Input | upper }}", + Path: "a.d.e.f", + Type: "string", + Value: []string{"not-valid"}, + }, + { + Path: "a.h.i", + Type: "string", + Value: []string{"not-valid", "not-valid-2"}, }, }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), - }, - Data: []byte("{\"k\": \"aGVsbG8gd29ybGQ=\"}"), - }}, - want: true, - wantErr: false, - }, - { - name: "string filter base64 template", - args: args{data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeNumber, - Value: []string{"3.13"}, - Comparator: ">", - Template: "{{ b64dec .Input }}", - }, - }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + } + + eventDataBytes, err := json.Marshal(map[string]interface{}{ + "a": map[string]interface{}{ + "b": "c", + "d": map[string]interface{}{ + "e": map[string]interface{}{ + "f": "g", }, - Data: []byte("{\"k\": \"My4xNA==\"}"), // 3.14 - }}, - want: true, - wantErr: false, - }, - { - name: "string filter base64 template, comparator not equal", - args: args{data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"hello world"}, - Template: "{{ b64dec .Input }}", - Comparator: "!=", - }, - }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + }, + "h": map[string]interface{}{ + "i": "j", + }, + }, + }) + + assert.NoError(t, err) + + // should return false because the two filters above evaluate to false + filtersLogicalOperator := v1alpha1.OrLogicalOperator + + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: eventDataBytes, + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, pass) + }) + + t.Run("test advanced logic: (A || B) || (C || D)", func(t *testing.T) { + // data filter: A || B == false + // expr filter: C || D == true + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.OrLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"y"}, + }, + { + // B + Path: "b", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"11"}, + }, + }, + ExprLogicalOperator: v1alpha1.OrLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ + { + // C + Expr: `d == "hello world"`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.d", + Name: "d", + }, }, - Data: []byte("{\"k\": \"aGVsbG8gd29ybGQ\"}"), - }}, - want: true, - wantErr: false, - }, - { - name: "string filter base64 template, regex", - args: args{data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"world$"}, - Template: "{{ b64dec .Input }}", - }, - }, - event: &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - DataContentType: ("application/json"), + }, + { + // D + Expr: `e == false`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", + }, }, - Data: []byte("{\"k\": \"aGVsbG8gd29ybGQ=\"}"), - }}, - want: true, - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := filterData(tt.args.data, tt.args.event) - if (err != nil) != tt.wantErr { - t.Errorf("filterData() error = %v, wantErr %v", err, tt.wantErr) - return - } - if got != tt.want { - t.Errorf("filterData() = %v, want %v", got, tt.want) - } - }) - } -} + }, + }, + } + filtersLogicalOperator := v1alpha1.OrLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } -func TestFilterTime(t *testing.T) { - now := time.Now().UTC() - eventTimes := [6]time.Time{ - time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC), - time.Date(now.Year(), now.Month(), now.Day(), 4, 5, 6, 0, time.UTC), - time.Date(now.Year(), now.Month(), now.Day(), 8, 9, 10, 0, time.UTC), - time.Date(now.Year(), now.Month(), now.Day(), 12, 13, 14, 0, time.UTC), - time.Date(now.Year(), now.Month(), now.Day(), 16, 17, 18, 0, time.UTC), - time.Date(now.Year(), now.Month(), now.Day(), 20, 21, 22, 0, time.UTC), - } - - time1 := eventTimes[2].Format("15:04:05") - time2 := eventTimes[4].Format("15:04:05") - - tests := []struct { - name string - timeFilter *v1alpha1.TimeFilter - results [6]bool - }{ - { - name: "no filter", - timeFilter: nil, - results: [6]bool{true, true, true, true, true, true}, - // With no filter, any event time should pass - }, - { - name: "start < stop", - timeFilter: &v1alpha1.TimeFilter{ - Start: time1, - Stop: time2, - }, - results: [6]bool{false, false, true, true, false, false}, - // ~~~~~~~~~~ - // [time1 , time2) - }, - { - name: "stop < start", - timeFilter: &v1alpha1.TimeFilter{ - Start: time2, - Stop: time1, - }, - results: [6]bool{true, true, false, false, true, true}, - // ~~~~~~~~~~ ~~~~~~~~~~ - // [ , time1) [time2 , ) - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - for i, eventTime := range eventTimes { - result, err := filterTime(test.timeFilter, eventTime) - assert.Nil(t, err) - assert.Equal(t, test.results[i], result) - } - }) - } -} + pass, err := filterEvent(filter, filtersLogicalOperator, event) -func TestFilterEvent(t *testing.T) { - now := time.Now().UTC() - eventTime := time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC) - - filter := v1alpha1.EventDependencyFilter{ - Time: &v1alpha1.TimeFilter{ - Start: "09:09:09", - Stop: "19:19:19", - }, - Context: &v1alpha1.EventContext{ - Type: "webhook", - Source: "webhook-gateway", - }, - Data: []v1alpha1.DataFilter{ - { - Path: "k", - Type: v1alpha1.JSONTypeString, - Value: []string{"v"}, - }, - }, - } - event := &v1alpha1.Event{ - Context: &v1alpha1.EventContext{ - Type: "webhook", - SpecVersion: "0.3", - Source: "webhook-gateway", - ID: "1", - Time: metav1.Time{Time: eventTime}, - DataContentType: ("application/json"), - Subject: ("example-1"), - }, - Data: []byte("{\"k\": \"v\"}"), - } - - valid, err := filterEvent(&filter, event) - assert.Nil(t, err) - assert.Equal(t, valid, true) -} + assert.NoError(t, err) + assert.True(t, pass) + }) -func TestExprFilter(t *testing.T) { - tests := []struct { - event *v1alpha1.Event - filters []v1alpha1.ExprFilter - result bool - err error - }{ - { - event: &v1alpha1.Event{ - Data: []byte(`{"a": "b"}`), + t.Run("test advanced logic: (A && B) || (C || D)", func(t *testing.T) { + // data filter: A && B == true + // expr filter: C || D == false + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.AndLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"10"}, + }, }, - filters: []v1alpha1.ExprFilter{ + ExprLogicalOperator: v1alpha1.OrLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ { - Expr: `a == "b"`, + // C + Expr: `d == "hello everybody"`, Fields: []v1alpha1.PayloadField{ { - Path: "a", - Name: "a", + Path: "c.d", + Name: "d", + }, + }, + }, + { + // D + Expr: `e == true`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", }, }, }, }, - result: true, - err: nil, - }, - { - event: &v1alpha1.Event{ - Data: []byte(`{"a": {"b": "c"}}`), + } + filtersLogicalOperator := v1alpha1.OrLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, pass) + }) + + t.Run("test advanced logic: (A || B) || (C && D)", func(t *testing.T) { + // data filter: A || B == true + // expr filter: C && D == false + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.OrLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"11"}, + }, }, - filters: []v1alpha1.ExprFilter{ + ExprLogicalOperator: v1alpha1.AndLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ { - Expr: `b == "b"`, + // C + Expr: `d == "hello everybody"`, Fields: []v1alpha1.PayloadField{ { - Path: "a.b", - Name: "b", + Path: "c.d", + Name: "d", + }, + }, + }, + { + // D + Expr: `e == false`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", }, }, }, }, - result: false, - err: nil, - }, - { - event: &v1alpha1.Event{ - Data: []byte(`{"a": {"b": "c"}}`), + } + filtersLogicalOperator := v1alpha1.OrLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, }, - filters: []v1alpha1.ExprFilter{ + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.True(t, pass) + }) + + t.Run("test advanced logic: (A && B) && (C || D)", func(t *testing.T) { + // data filter: A && B == false + // expr filter: C || D == false + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.AndLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"11"}, + }, + }, + ExprLogicalOperator: v1alpha1.OrLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ { - Expr: `b == "b"`, + // C + Expr: `d == "hello everybody"`, Fields: []v1alpha1.PayloadField{ { - Path: "a.b", - Name: "b", + Path: "c.d", + Name: "d", }, }, }, { - Expr: `b == "c"`, + // D + Expr: `e == true`, Fields: []v1alpha1.PayloadField{ { - Path: "a.b", - Name: "b", + Path: "c.e", + Name: "e", }, }, }, }, - result: true, - err: nil, - }, - { - event: &v1alpha1.Event{ - Data: []byte(`{"a": {"b": 2}}`), + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) + + assert.NoError(t, err) + assert.False(t, pass) + }) + + t.Run("test advanced logic: (A || B) && (C && D)", func(t *testing.T) { + // data filter: A || B == true + // expr filter: C && D == error (false) + filter := &v1alpha1.EventDependencyFilter{ + DataLogicalOperator: v1alpha1.OrLogicalOperator, + Data: []v1alpha1.DataFilter{ + { + // A + Path: "a", + Type: v1alpha1.JSONTypeString, + Value: []string{"x"}, + }, + { + // B + Path: "b", + Type: v1alpha1.JSONTypeNumber, + Value: []string{"10"}, + }, }, - filters: []v1alpha1.ExprFilter{ + ExprLogicalOperator: v1alpha1.AndLogicalOperator, + Exprs: []v1alpha1.ExprFilter{ { - Expr: `b == 2`, + // C + Expr: `f == "hello world"`, Fields: []v1alpha1.PayloadField{ { - Path: "a.b", - Name: "b", + Path: "c.f", + Name: "f", + }, + }, + }, + { + // D + Expr: `e == false`, + Fields: []v1alpha1.PayloadField{ + { + Path: "c.e", + Name: "e", }, }, }, }, - result: true, - err: nil, - }, - } + } + filtersLogicalOperator := v1alpha1.AndLogicalOperator + now := time.Now().UTC() + event := &v1alpha1.Event{ + Context: &v1alpha1.EventContext{ + Time: metav1.Time{ + Time: time.Date(now.Year(), now.Month(), now.Day(), 16, 36, 34, 0, time.UTC), + }, + }, + Data: []byte(`{ "a": "x", "b": 10, "c": { "d": "hello world", "e": false } }`), + } + + pass, err := filterEvent(filter, filtersLogicalOperator, event) - for _, test := range tests { - result, err := filterExpr(test.filters, test.event) - assert.Equal(t, test.err, err) - assert.Equal(t, test.result, result) - } + assert.Error(t, err) + assert.False(t, pass) + }) } diff --git a/sensors/dependencies/filter_time_test.go b/sensors/dependencies/filter_time_test.go new file mode 100644 index 0000000000..a17f6dca57 --- /dev/null +++ b/sensors/dependencies/filter_time_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2018 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dependencies + +import ( + "testing" + "time" + + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + "github.com/stretchr/testify/assert" +) + +func TestFilterTime(t *testing.T) { + now := time.Now().UTC() + eventTimes := [6]time.Time{ + time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC), + time.Date(now.Year(), now.Month(), now.Day(), 4, 5, 6, 0, time.UTC), + time.Date(now.Year(), now.Month(), now.Day(), 8, 9, 10, 0, time.UTC), + time.Date(now.Year(), now.Month(), now.Day(), 12, 13, 14, 0, time.UTC), + time.Date(now.Year(), now.Month(), now.Day(), 16, 17, 18, 0, time.UTC), + time.Date(now.Year(), now.Month(), now.Day(), 20, 21, 22, 0, time.UTC), + } + + time1 := eventTimes[2].Format("15:04:05") + time2 := eventTimes[4].Format("15:04:05") + + tests := []struct { + name string + timeFilter *v1alpha1.TimeFilter + results [6]bool + }{ + { + name: "no filter", + timeFilter: nil, + results: [6]bool{true, true, true, true, true, true}, + // With no filter, any event time should pass + }, + { + name: "start less than stop", + timeFilter: &v1alpha1.TimeFilter{ + Start: time1, + Stop: time2, + }, + results: [6]bool{false, false, true, true, false, false}, + // ~~~~~~~~~~ + // [time1 , time2) + }, + { + name: "stop less than start", + timeFilter: &v1alpha1.TimeFilter{ + Start: time2, + Stop: time1, + }, + results: [6]bool{true, true, false, false, true, true}, + // ~~~~~~~~~~ ~~~~~~~~~~ + // [ , time1) [time2 , ) + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + for i, eventTime := range eventTimes { + result, err := filterTime(test.timeFilter, eventTime) + assert.Nil(t, err) + assert.Equal(t, test.results[i], result) + } + }) + } +} diff --git a/sensors/dependencies/transform.go b/sensors/dependencies/transform.go new file mode 100644 index 0000000000..da754adc13 --- /dev/null +++ b/sensors/dependencies/transform.go @@ -0,0 +1,199 @@ +package dependencies + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/itchyny/gojq" + "github.com/tidwall/gjson" + lua "github.com/yuin/gopher-lua" +) + +func ApplyTransform(event *cloudevents.Event, transform *v1alpha1.EventDependencyTransformer) (*cloudevents.Event, error) { + if transform == nil { + return event, nil + } + if transform.JQ != "" { + return applyJQTransform(event, transform.JQ) + } + if transform.Script != "" { + return applyScriptTransform(event, transform.Script) + } + return event, nil +} + +func applyJQTransform(event *cloudevents.Event, command string) (*cloudevents.Event, error) { + if event == nil { + return nil, fmt.Errorf("nil Event") + } + payload := event.Data() + if payload == nil { + return event, nil + } + var js *json.RawMessage + if err := json.Unmarshal(payload, &js); err != nil { + return nil, err + } + var jsData []byte + jsData, err := json.Marshal(js) + if err != nil { + return nil, err + } + query, err := gojq.Parse(command) + if err != nil { + return nil, err + } + var temp map[string]interface{} + if err = json.Unmarshal(jsData, &temp); err != nil { + return nil, err + } + iter := query.Run(temp) + v, ok := iter.Next() + if !ok { + return nil, fmt.Errorf("no output available from the jq command execution") + } + switch v.(type) { + case map[string]interface{}: + resultContent, err := json.Marshal(v) + if err != nil { + return nil, err + } + if !gjson.ValidBytes(resultContent) { + return nil, fmt.Errorf("jq transformation output is not a JSON object") + } + if err = event.SetData(cloudevents.ApplicationJSON, resultContent); err != nil { + return nil, err + } + return event, nil + default: + return nil, fmt.Errorf("jq transformation output must be a JSON object") + } +} + +func applyScriptTransform(event *cloudevents.Event, script string) (*cloudevents.Event, error) { + l := lua.NewState() + defer l.Close() + payload := event.Data() + if payload == nil { + return event, nil + } + var js *json.RawMessage + if err := json.Unmarshal(payload, &js); err != nil { + return nil, err + } + var jsData []byte + jsData, err := json.Marshal(js) + if err != nil { + return nil, err + } + var payloadJson map[string]interface{} + if err = json.Unmarshal(jsData, &payloadJson); err != nil { + return nil, err + } + lEvent := mapToTable(payloadJson) + l.SetGlobal("event", lEvent) + if err = l.DoString(script); err != nil { + return nil, err + } + lv := l.Get(-1) + tbl, ok := lv.(*lua.LTable) + if !ok { + return nil, fmt.Errorf("transformation script output type is not of lua table") + } + result := toGoValue(tbl) + resultJson, err := json.Marshal(result) + if err != nil { + return nil, err + } + if !gjson.Valid(string(resultJson)) { + return nil, fmt.Errorf("script transformation output is not a JSON object") + } + if err := event.SetData(cloudevents.ApplicationJSON, resultJson); err != nil { + return nil, err + } + return event, nil +} + +// MapToTable converts a Go map to a lua table +func mapToTable(m map[string]interface{}) *lua.LTable { + resultTable := &lua.LTable{} + for key, element := range m { + switch t := element.(type) { + case float64: + resultTable.RawSetString(key, lua.LNumber(t)) + case int64: + resultTable.RawSetString(key, lua.LNumber(t)) + case string: + resultTable.RawSetString(key, lua.LString(t)) + case bool: + resultTable.RawSetString(key, lua.LBool(t)) + case []byte: + resultTable.RawSetString(key, lua.LString(string(t))) + case map[string]interface{}: + table := mapToTable(element.(map[string]interface{})) + resultTable.RawSetString(key, table) + case time.Time: + resultTable.RawSetString(key, lua.LNumber(t.Unix())) + case []map[string]interface{}: + sliceTable := &lua.LTable{} + for _, s := range element.([]map[string]interface{}) { + table := mapToTable(s) + sliceTable.Append(table) + } + resultTable.RawSetString(key, sliceTable) + case []interface{}: + sliceTable := &lua.LTable{} + for _, s := range element.([]interface{}) { + switch tt := s.(type) { + case map[string]interface{}: + t := mapToTable(s.(map[string]interface{})) + sliceTable.Append(t) + case float64: + sliceTable.Append(lua.LNumber(tt)) + case string: + sliceTable.Append(lua.LString(tt)) + case bool: + sliceTable.Append(lua.LBool(tt)) + } + } + resultTable.RawSetString(key, sliceTable) + default: + } + } + return resultTable +} + +// toGoValue converts the given LValue to a Go object. +func toGoValue(lv lua.LValue) interface{} { + switch v := lv.(type) { + case *lua.LNilType: + return nil + case lua.LBool: + return bool(v) + case lua.LString: + return string(v) + case lua.LNumber: + return float64(v) + case *lua.LTable: + maxn := v.MaxN() + if maxn == 0 { // table + ret := make(map[string]interface{}) + v.ForEach(func(key, value lua.LValue) { + keystr := key.String() + ret[keystr] = toGoValue(value) + }) + return ret + } else { // array + ret := make([]interface{}, 0, maxn) + for i := 1; i <= maxn; i++ { + ret = append(ret, toGoValue(v.RawGetInt(i))) + } + return ret + } + default: + return v + } +} diff --git a/sensors/dependencies/transform_test.go b/sensors/dependencies/transform_test.go new file mode 100644 index 0000000000..bd0ddb4792 --- /dev/null +++ b/sensors/dependencies/transform_test.go @@ -0,0 +1,119 @@ +package dependencies + +import ( + "testing" + + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/cloudevents/sdk-go/v2/types" + "github.com/stretchr/testify/assert" +) + +func strptr(s string) *string { + return &s +} + +func TestApplyJQTransform(t *testing.T) { + tests := []struct { + event *cloudevents.Event + result *cloudevents.Event + command string + hasError bool + }{ + { + event: &cloudevents.Event{ + Context: &cloudevents.EventContextV1{ + ID: "123", + Source: types.URIRef{}, + DataContentType: strptr(cloudevents.ApplicationJSON), + Subject: strptr("hello"), + Time: &types.Timestamp{}, + }, + DataEncoded: []byte(`{"a":1,"b":"2"}`), + }, + result: &cloudevents.Event{ + Context: &cloudevents.EventContextV1{ + ID: "123", + Source: types.URIRef{}, + DataContentType: strptr(cloudevents.ApplicationJSON), + Subject: strptr("hello"), + Time: &types.Timestamp{}, + }, + DataEncoded: []byte(`{"a":2,"b":"22"}`), + }, + hasError: false, + command: ".a += 1 | .b *= 2", + }, + } + for _, tt := range tests { + result, err := applyJQTransform(tt.event, tt.command) + if tt.hasError { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + } + assert.Equal(t, tt.result.Data(), result.Data()) + } +} + +func TestApplyScriptTransform(t *testing.T) { + tests := []struct { + event *cloudevents.Event + result *cloudevents.Event + script string + hasError bool + }{ + { + event: &cloudevents.Event{ + Context: &cloudevents.EventContextV1{ + ID: "123", + Source: types.URIRef{}, + DataContentType: strptr(cloudevents.ApplicationJSON), + Subject: strptr("hello"), + Time: &types.Timestamp{}, + }, + DataEncoded: []byte(`{"a":1,"b":"2","c":{"d":[3]}}`), + }, + result: &cloudevents.Event{ + Context: &cloudevents.EventContextV1{ + ID: "123", + Source: types.URIRef{}, + DataContentType: strptr(cloudevents.ApplicationJSON), + Subject: strptr("hello"), + Time: &types.Timestamp{}, + }, + DataEncoded: []byte(`{"a":1,"b":"2","c":{"d":[4]}}`), + }, + hasError: false, + script: ` +event.c.d[1]=4 +return event +`, + }, + { + event: &cloudevents.Event{ + Context: &cloudevents.EventContextV1{ + ID: "123", + Source: types.URIRef{}, + DataContentType: strptr(cloudevents.ApplicationJSON), + Subject: strptr("hello"), + Time: &types.Timestamp{}, + }, + DataEncoded: []byte(`{"a":1,"b":"2","c":{"d":[3]}}`), + }, + result: nil, + hasError: true, + script: ` +return "hello" +`, + }, + } + for _, tt := range tests { + result, err := applyScriptTransform(tt.event, tt.script) + if tt.hasError { + assert.NotNil(t, err) + } else { + assert.Nil(t, err) + assert.Equal(t, tt.result.Data(), result.Data()) + } + } +} diff --git a/sensors/listener.go b/sensors/listener.go index 11aadb16c8..7fe2ace771 100644 --- a/sensors/listener.go +++ b/sensors/listener.go @@ -19,7 +19,6 @@ package sensors import ( "context" "fmt" - "math/rand" "strings" "sync" "sync/atomic" @@ -27,22 +26,26 @@ import ( "github.com/Knetic/govaluate" "github.com/antonmedv/expr" - cloudevents "github.com/cloudevents/sdk-go/v2" - "github.com/pkg/errors" - "go.uber.org/zap" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - + "github.com/argoproj/argo-events/codefresh" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/leaderelection" "github.com/argoproj/argo-events/common/logging" "github.com/argoproj/argo-events/eventbus" - eventbusdriver "github.com/argoproj/argo-events/eventbus/driver" + eventbuscommon "github.com/argoproj/argo-events/eventbus/common" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" sensordependencies "github.com/argoproj/argo-events/sensors/dependencies" sensortriggers "github.com/argoproj/argo-events/sensors/triggers" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/pkg/errors" + cronlib "github.com/robfig/cron/v3" + "go.uber.org/ratelimit" + "go.uber.org/zap" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var rateLimiters = make(map[string]ratelimit.Limiter) + func subscribeOnce(subLock *uint32, subscribe func()) { // acquire subLock if not already held if !atomic.CompareAndSwapUint32(subLock, 0, 1) { @@ -52,85 +55,126 @@ func subscribeOnce(subLock *uint32, subscribe func()) { subscribe() } -func (sensorCtx *SensorContext) getGroupAndClientID(depExpression string) (string, string) { - // Generate clientID with hash code - hashKey := fmt.Sprintf("%s-%s", sensorCtx.sensor.Name, depExpression) - s1 := rand.NewSource(time.Now().UnixNano()) - r1 := rand.New(s1) - hashVal := common.Hasher(hashKey) - group := fmt.Sprintf("client-%v", hashVal) - clientID := fmt.Sprintf("client-%v-%v", hashVal, r1.Intn(100)) - return group, clientID -} - func (sensorCtx *SensorContext) Start(ctx context.Context) error { log := logging.FromContext(ctx) - custerName := fmt.Sprintf("%s-sensor-%s", sensorCtx.sensor.Namespace, sensorCtx.sensor.Name) - elector, err := leaderelection.NewEventBusElector(ctx, *sensorCtx.eventBusConfig, custerName, int(sensorCtx.sensor.Spec.GetReplicas())) + clusterName := fmt.Sprintf("%s-sensor-%s", sensorCtx.sensor.Namespace, sensorCtx.sensor.Name) + replicas := int(sensorCtx.sensor.Spec.GetReplicas()) + leasename := fmt.Sprintf("sensor-%s", sensorCtx.sensor.Name) + + // sensor for kafka eventbus can be scaled horizontally, + // therefore does not require an elector + if sensorCtx.eventBusConfig.Kafka != nil { + return sensorCtx.listenEvents(ctx) + } + + elector, err := leaderelection.NewElector(ctx, *sensorCtx.eventBusConfig, clusterName, replicas, sensorCtx.sensor.Namespace, leasename, sensorCtx.hostname) if err != nil { log.Errorw("failed to get an elector", zap.Error(err)) return err } + elector.RunOrDie(ctx, leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { if err := sensorCtx.listenEvents(ctx); err != nil { - log.Errorw("failed to start", zap.Error(err)) + log.Fatalw("failed to start", zap.Error(err)) } }, OnStoppedLeading: func() { - log.Infof("leader lost: %s", sensorCtx.hostname) + log.Fatalf("leader lost: %s", sensorCtx.hostname) }, }) + return nil } +func initRateLimiter(trigger v1alpha1.Trigger) { + duration := time.Second + if trigger.RateLimit != nil { + switch trigger.RateLimit.Unit { + case v1alpha1.Minute: + duration = time.Minute + case v1alpha1.Hour: + duration = time.Hour + } + rateLimiters[trigger.Template.Name] = ratelimit.New(int(trigger.RateLimit.RequestsPerUnit), ratelimit.Per(duration)) + } else { + rateLimiters[trigger.Template.Name] = ratelimit.NewUnlimited() + } +} + // listenEvents watches and handles events received from the gateway. func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error { logger := logging.FromContext(ctx) sensor := sensorCtx.sensor - // Get a mapping of dependencyExpression: []triggers - triggerMapping := make(map[string][]v1alpha1.Trigger) - for _, trigger := range sensor.Spec.Triggers { - depExpr, err := sensorCtx.getDependencyExpression(ctx, trigger) - if err != nil { - logger.Errorw("failed to get dependency expression", zap.Error(err)) - return err - } - triggers, ok := triggerMapping[depExpr] - if !ok { - triggers = []v1alpha1.Trigger{} - } - triggers = append(triggers, trigger) - triggerMapping[depExpr] = triggers - } depMapping := make(map[string]v1alpha1.EventDependency) for _, d := range sensor.Spec.Dependencies { depMapping[d.Name] = d } - cctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithCancel(ctx) + defer cancel() + ebDriver, err := eventbus.GetSensorDriver(logging.WithLogger(ctx, logger), *sensorCtx.eventBusConfig, sensorCtx.sensor, sensorCtx.hostname) + if err != nil { + return err + } + err = common.DoWithRetry(&common.DefaultBackoff, func() error { + return ebDriver.Initialize() + }) + if err != nil { + return err + } + wg := &sync.WaitGroup{} - for k, v := range triggerMapping { + for _, t := range sensor.Spec.Triggers { + initRateLimiter(t) wg.Add(1) - go func(depExpression string, triggers []v1alpha1.Trigger) { + go func(trigger v1alpha1.Trigger) { + triggerLogger := logger.With(logging.LabelTriggerName, trigger.Template.Name) + defer wg.Done() - // Calculate dependencies of each group of triggers. + depExpression, err := sensorCtx.getDependencyExpression(ctx, trigger) + if err != nil { + triggerLogger.Errorw("failed to get dependency expression", zap.Error(err)) + sensorCtx.cfClient.ReportError( + errors.Wrap(err, "failed to get dependency expression"), + codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }, + ) + return + } + // Calculate dependencies of each of the triggers. de := strings.ReplaceAll(depExpression, "-", "\\-") expr, err := govaluate.NewEvaluableExpression(de) if err != nil { - logger.Errorw("failed to get new evaluable expression", zap.Error(err)) + triggerLogger.Errorw("failed to get new evaluable expression", zap.Error(err)) + sensorCtx.cfClient.ReportError( + errors.Wrap(err, "failed to get new evaluable expression"), + codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }, + ) return } depNames := unique(expr.Vars()) - deps := []eventbusdriver.Dependency{} + deps := []eventbuscommon.Dependency{} for _, depName := range depNames { dep, ok := depMapping[depName] if !ok { - logger.Errorf("Dependency expression and dependency list do not match, %s is not found", depName) + triggerLogger.Errorf("Dependency expression and dependency list do not match, %s is not found", depName) + sensorCtx.cfClient.ReportError( + errors.Wrapf(err, "Dependency expression and dependency list do not match, %s is not found", depName), + codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }, + ) return } - d := eventbusdriver.Dependency{ + d := eventbuscommon.Dependency{ Name: dep.Name, EventSourceName: dep.EventSourceName, EventName: dep.EventName, @@ -138,29 +182,39 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error { deps = append(deps, d) } - group, clientID := sensorCtx.getGroupAndClientID(depExpression) - ebDriver, err := eventbus.GetDriver(cctx, *sensorCtx.eventBusConfig, sensorCtx.eventBusSubject, clientID) - if err != nil { - logger.Errorw("failed to get eventbus driver", zap.Error(err)) - return - } - triggerNames := []string{} - for _, t := range triggers { - triggerNames = append(triggerNames, t.Template.Name) - } - var conn eventbusdriver.Connection - err = common.Connect(&common.DefaultBackoff, func() error { + var conn eventbuscommon.TriggerConnection + err = common.DoWithRetry(&common.DefaultBackoff, func() error { var err error - conn, err = ebDriver.Connect() + conn, err = ebDriver.Connect(ctx, trigger.Template.Name, depExpression, deps, trigger.AtLeastOnce) + triggerLogger.Debugf("just created connection %v, %+v", &conn, conn) return err }) if err != nil { - logger.Fatalw("failed to connect to event bus", zap.Error(err)) + // report before fatal exit + sensorCtx.cfClient.ReportError( + errors.Wrap(err, "failed to connect to event bus"), + codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }, + ) + triggerLogger.Fatalw("failed to connect to event bus", zap.Error(err)) return } defer conn.Close() - filterFunc := func(depName string, event cloudevents.Event) bool { + transformFunc := func(depName string, event cloudevents.Event) (*cloudevents.Event, error) { + dep, ok := depMapping[depName] + if !ok { + return nil, fmt.Errorf("dependency %s not found", dep.Name) + } + if dep.Transform == nil { + return &event, nil + } + return sensordependencies.ApplyTransform(&event, dep.Transform) + } + + filterFunc := func(depName string, cloudEvent cloudevents.Event) bool { dep, ok := depMapping[depName] if !ok { return false @@ -168,18 +222,40 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error { if dep.Filters == nil { return true } - e := convertEvent(event) - result, err := sensordependencies.Filter(e, dep.Filters) + argoEvent := convertEvent(cloudEvent) + + result, err := sensordependencies.Filter(argoEvent, dep.Filters, dep.FiltersLogicalOperator) if err != nil { - logger.Errorw("failed to apply filters", zap.Error(err)) - return false + if !result { + triggerLogger.Warnf("Event [%s] discarded due to filtering error: %s", + eventToString(argoEvent), err.Error()) + } else { + triggerLogger.Warnf("Event [%s] passed but with filtering error: %s", + eventToString(argoEvent), err.Error()) + } + sensorCtx.cfClient.ReportError( + errors.Wrap(err, "failed to apply filters"), + codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }, + ) + } else if !result { + triggerLogger.Debugf("Event [%s] discarded due to filtering", eventToString(argoEvent)) } return result } actionFunc := func(events map[string]cloudevents.Event) { - if err := sensorCtx.triggerActions(cctx, sensor, events, triggers); err != nil { - logger.Errorw("failed to trigger actions", zap.Error(err)) + retryStrategy := trigger.RetryStrategy + if retryStrategy == nil { + retryStrategy = &apicommon.Backoff{Steps: 1} + } + err := common.DoWithRetry(retryStrategy, func() error { + return sensorCtx.triggerActions(ctx, sensor, events, trigger) + }) + if err != nil { + triggerLogger.Warnf("failed to trigger actions, %v", err) } } @@ -187,6 +263,55 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error { wg1 := &sync.WaitGroup{} closeSubCh := make(chan struct{}) + resetConditionsCh := make(chan struct{}) + var lastResetTime time.Time + if len(trigger.Template.ConditionsReset) > 0 { + for _, c := range trigger.Template.ConditionsReset { + if c.ByTime == nil { + continue + } + cronParser := cronlib.NewParser(cronlib.Minute | cronlib.Hour | cronlib.Dom | cronlib.Month | cronlib.Dow) + opts := []cronlib.Option{ + cronlib.WithParser(cronParser), + cronlib.WithChain(cronlib.Recover(cronlib.DefaultLogger)), + } + nowTime := time.Now() + if c.ByTime.Timezone != "" { + location, err := time.LoadLocation(c.ByTime.Timezone) + if err != nil { + triggerLogger.Errorw("failed to load timezone", zap.Error(err)) + continue + } + opts = append(opts, cronlib.WithLocation(location)) + nowTime = nowTime.In(location) + } + cr := cronlib.New(opts...) + _, err = cr.AddFunc(c.ByTime.Cron, func() { + resetConditionsCh <- struct{}{} + }) + if err != nil { + triggerLogger.Errorw("failed to add cron schedule", zap.Error(err)) + continue + } + cr.Start() + + triggerLogger.Debugf("just started cron job; entries=%v", cr.Entries()) + + // set lastResetTime (the last time this would've been triggered) + if len(cr.Entries()) > 0 { + prevTriggerTime, err := common.PrevCronTime(c.ByTime.Cron, cronParser, nowTime) + if err != nil { + triggerLogger.Errorw("couldn't get previous cron trigger time", zap.Error(err)) + continue + } + triggerLogger.Infof("previous trigger time: %v", prevTriggerTime) + if prevTriggerTime.After(lastResetTime) { + lastResetTime = prevTriggerTime + } + } + } + } + subscribeFunc := func() { wg1.Add(1) go func() { @@ -194,48 +319,53 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error { // release the lock when goroutine exits defer atomic.StoreUint32(&subLock, 0) - logger.Infof("started subscribing to events for triggers %s with client %s", fmt.Sprintf("[%s]", strings.Join(triggerNames, " ")), clientID) + triggerLogger.Infof("started subscribing to events for trigger %s with client connection %s", trigger.Template.Name, conn) - err = ebDriver.SubscribeEventSources(cctx, conn, group, closeSubCh, depExpression, deps, filterFunc, actionFunc) + subject := &sensorCtx.eventBusSubject + err = conn.Subscribe(ctx, closeSubCh, resetConditionsCh, lastResetTime, transformFunc, filterFunc, actionFunc, subject) if err != nil { - logger.Errorw("failed to subscribe to eventbus", zap.Any("clientID", clientID), zap.Error(err)) + triggerLogger.Errorw("failed to subscribe to eventbus", zap.Any("connection", conn), zap.Error(err)) + sensorCtx.cfClient.ReportError( + errors.Wrapf(err, "failed to subscribe to eventbus, connection: %v", conn), + codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }, + ) return } + triggerLogger.Debugf("exiting subscribe goroutine, conn=%+v", conn) }() } subscribeOnce(&subLock, subscribeFunc) - logger.Infof("starting eventbus connection daemon for client %s...", clientID) + triggerLogger.Infof("starting eventbus connection daemon for client %s...", conn) ticker := time.NewTicker(5 * time.Second) defer ticker.Stop() for { select { - case <-cctx.Done(): - logger.Infof("exiting eventbus connection daemon for client %s...", clientID) + case <-ctx.Done(): + triggerLogger.Infof("exiting eventbus connection daemon for client %s...", conn) wg1.Wait() return case <-ticker.C: if conn == nil || conn.IsClosed() { - logger.Info("NATS connection lost, reconnecting...") - // Regenerate the client ID to avoid the issue that NAT server still thinks the client is alive. - _, clientID := sensorCtx.getGroupAndClientID(depExpression) - ebDriver, err := eventbus.GetDriver(cctx, *sensorCtx.eventBusConfig, sensorCtx.eventBusSubject, clientID) - if err != nil { - logger.Errorw("failed to get eventbus driver during reconnection", zap.Error(err)) - continue - } - conn, err = ebDriver.Connect() + triggerLogger.Info("EventBus connection lost, reconnecting...") + conn, err = ebDriver.Connect(ctx, trigger.Template.Name, depExpression, deps, trigger.AtLeastOnce) if err != nil { - logger.Errorw("failed to reconnect to eventbus", zap.Any("clientID", clientID), zap.Error(err)) + triggerLogger.Errorw("failed to reconnect to eventbus", zap.Any("connection", conn), zap.Error(err)) continue } - logger.Infow("reconnected to NATS streaming server.", zap.Any("clientID", clientID)) + triggerLogger.Infow("reconnected to EventBus.", zap.Any("connection", conn)) if atomic.LoadUint32(&subLock) == 1 { + triggerLogger.Debug("acquired sublock, instructing trigger to shutdown subscription") closeSubCh <- struct{}{} // give subscription time to close time.Sleep(2 * time.Second) + } else { + triggerLogger.Debug("sublock not acquired") } } @@ -245,7 +375,7 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error { } } } - }(k, v) + }(t) } logger.Info("Sensor started.") <-ctx.Done() @@ -255,8 +385,7 @@ func (sensorCtx *SensorContext) listenEvents(ctx context.Context) error { return nil } -func (sensorCtx *SensorContext) triggerActions(ctx context.Context, sensor *v1alpha1.Sensor, events map[string]cloudevents.Event, triggers []v1alpha1.Trigger) error { - log := logging.FromContext(ctx) +func (sensorCtx *SensorContext) triggerActions(ctx context.Context, sensor *v1alpha1.Sensor, events map[string]cloudevents.Event, trigger v1alpha1.Trigger) error { eventsMapping := make(map[string]*v1alpha1.Event) depNames := make([]string, 0, len(events)) eventIDs := make([]string, 0, len(events)) @@ -265,16 +394,45 @@ func (sensorCtx *SensorContext) triggerActions(ctx context.Context, sensor *v1al depNames = append(depNames, k) eventIDs = append(eventIDs, v.ID()) } - for _, trigger := range triggers { - if err := sensorCtx.triggerOne(ctx, sensor, trigger, eventsMapping, depNames, eventIDs, log); err != nil { - // Log the error, and let it continue - log.Errorw("failed to execute a trigger", zap.Error(err), zap.String(logging.LabelTriggerName, trigger.Template.Name), - zap.Any("triggeredBy", depNames), zap.Any("triggeredByEvents", eventIDs)) - sensorCtx.metrics.ActionFailed(sensor.Name, trigger.Template.Name) - } else { - sensorCtx.metrics.ActionTriggered(sensor.Name, trigger.Template.Name) - } + if trigger.AtLeastOnce { + // By making this a blocking call, wait to Ack the message + // until this trigger is executed. + return sensorCtx.triggerWithRateLimit(ctx, sensor, trigger, eventsMapping, depNames, eventIDs) + } else { + go func() { + err := sensorCtx.triggerWithRateLimit(ctx, sensor, trigger, eventsMapping, depNames, eventIDs) + if err != nil { + // Log the error, and let it continue + logger := logging.FromContext(ctx) + logger.Errorw("Failed to execute a trigger", zap.Error(err), zap.String(logging.LabelTriggerName, trigger.Template.Name)) + } + }() + return nil + } +} + +func (sensorCtx *SensorContext) triggerWithRateLimit(ctx context.Context, sensor *v1alpha1.Sensor, trigger v1alpha1.Trigger, eventsMapping map[string]*v1alpha1.Event, depNames, eventIDs []string) error { + if rl, ok := rateLimiters[trigger.Template.Name]; ok { + rl.Take() } + + log := logging.FromContext(ctx) + if err := sensorCtx.triggerOne(ctx, sensor, trigger, eventsMapping, depNames, eventIDs, log); err != nil { + // Log the error, and let it continue + log.Errorw("Failed to execute a trigger", zap.Error(err), zap.String(logging.LabelTriggerName, trigger.Template.Name), + zap.Any("triggeredBy", depNames), zap.Any("triggeredByEvents", eventIDs)) + sensorCtx.metrics.ActionFailed(sensor.Name, trigger.Template.Name) + sensorCtx.cfClient.ReportError( + errors.Wrapf(err, "failed to execute a trigger { %s: %s, %s: %+q, %s: %+q }", + logging.LabelTriggerName, trigger.Template.Name, "triggeredBy", depNames, "triggeredByEvents", eventIDs), + codefresh.ErrorContext{ + ObjectMeta: sensor.ObjectMeta, + TypeMeta: sensor.TypeMeta, + }, + ) + return err + } + sensorCtx.metrics.ActionTriggered(sensor.Name, trigger.Template.Name) return nil } @@ -283,8 +441,6 @@ func (sensorCtx *SensorContext) triggerOne(ctx context.Context, sensor *v1alpha1 sensorCtx.metrics.ActionDuration(sensor.Name, trigger.Template.Name, float64(time.Since(start)/time.Millisecond)) }(time.Now()) - defer sensorCtx.metrics.ActionDuration(sensor.Name, trigger.Template.Name, float64(time.Since(time.Now())/time.Millisecond)) - if err := sensortriggers.ApplyTemplateParameters(eventsMapping, &trigger); err != nil { log.Errorf("failed to apply template parameters, %v", err) return err @@ -295,7 +451,7 @@ func (sensorCtx *SensorContext) triggerOne(ctx context.Context, sensor *v1alpha1 logger.Debugw("resolving the trigger implementation") triggerImpl := sensorCtx.GetTrigger(ctx, &trigger) if triggerImpl == nil { - return errors.Errorf("invalid trigger %s, could not find an implementation", trigger.Template.Name) + return fmt.Errorf("invalid trigger %s, could not find an implementation", trigger.Template.Name) } logger = logger.With(logging.LabelTriggerType, triggerImpl.GetTriggerType()) @@ -305,7 +461,7 @@ func (sensorCtx *SensorContext) triggerOne(ctx context.Context, sensor *v1alpha1 return err } if obj == nil { - return errors.Errorf("invalid trigger %s, could not fetch the trigger resource", trigger.Template.Name) + return fmt.Errorf("invalid trigger %s, could not fetch the trigger resource", trigger.Template.Name) } logger.Debug("applying resource parameters if any") @@ -315,17 +471,9 @@ func (sensorCtx *SensorContext) triggerOne(ctx context.Context, sensor *v1alpha1 } logger.Debug("executing the trigger resource") - retryStrategy := trigger.RetryStrategy - if retryStrategy == nil { - retryStrategy = &apicommon.Backoff{Steps: 1} - } - var newObj interface{} - if err := common.Connect(retryStrategy, func() error { - var e error - newObj, e = triggerImpl.Execute(ctx, eventsMapping, updatedObj) - return e - }); err != nil { - return errors.Wrap(err, "failed to execute trigger") + newObj, err := triggerImpl.Execute(ctx, eventsMapping, updatedObj) + if err != nil { + return fmt.Errorf("failed to execute trigger, %w", err) } logger.Debug("trigger resource successfully executed") @@ -333,7 +481,7 @@ func (sensorCtx *SensorContext) triggerOne(ctx context.Context, sensor *v1alpha1 if err := triggerImpl.ApplyPolicy(ctx, newObj); err != nil { return err } - logger.Infow("successfully processed the trigger", + logger.Infow(fmt.Sprintf("Successfully processed trigger '%s'", trigger.Template.Name), zap.Any("triggeredBy", depNames), zap.Any("triggeredByEvents", eventIDs)) return nil } @@ -378,37 +526,10 @@ func (sensorCtx *SensorContext) getDependencyExpression(ctx context.Context, tri key := strings.ReplaceAll(dep.Name, "-", "_") depGroupMapping[key] = dep.Name } - for _, depGroup := range sensor.Spec.DependencyGroups { - key := strings.ReplaceAll(depGroup.Name, "-", "_") - depGroupMapping[key] = fmt.Sprintf("(%s)", strings.Join(depGroup.Dependencies, "&&")) - } depExpression, err = translate(conditions, depGroupMapping) if err != nil { return "", err } - case len(sensor.Spec.DependencyGroups) > 0 && sensor.Spec.DeprecatedCircuit != "" && trigger.Template.DeprecatedSwitch != nil: - // DEPRECATED. - logger.Warn("Circuit and Switch are deprecated, please use \"conditions\".") - temp := "" - sw := trigger.Template.DeprecatedSwitch - switch { - case len(sw.All) > 0: - temp = strings.Join(sw.All, "&&") - case len(sw.Any) > 0: - temp = strings.Join(sw.Any, "||") - default: - return "", errors.New("invalid trigger switch") - } - groupDepExpr := fmt.Sprintf("(%s) && (%s)", sensor.Spec.DeprecatedCircuit, temp) - depGroupMapping := make(map[string]string) - for _, depGroup := range sensor.Spec.DependencyGroups { - key := strings.ReplaceAll(depGroup.Name, "-", "_") - depGroupMapping[key] = fmt.Sprintf("(%s)", strings.Join(depGroup.Dependencies, "&&")) - } - depExpression, err = translate(groupDepExpr, depGroupMapping) - if err != nil { - return "", err - } default: deps := []string{} for _, dep := range sensor.Spec.Dependencies { @@ -416,15 +537,13 @@ func (sensorCtx *SensorContext) getDependencyExpression(ctx context.Context, tri } depExpression = strings.Join(deps, "&&") } - logger.Infof("Dependency expression for trigger %s before simplification: %s", trigger.Template.Name, depExpression) - boolSimplifier, err := common.NewBoolExpression(depExpression) - if err != nil { - logger.Errorw("Invalid dependency expression", zap.Error(err)) - return "", err - } - result := boolSimplifier.GetExpression() - logger.Infof("Dependency expression for trigger %s after simplification: %s", trigger.Template.Name, result) - return result, nil + logger.Infof("Dependency expression for trigger %s: %s", trigger.Template.Name, depExpression) + return depExpression, nil +} + +func eventToString(event *v1alpha1.Event) string { + return fmt.Sprintf("ID '%s', Source '%s', Time '%s', Data '%s'", + event.Context.ID, event.Context.Source, event.Context.Time.Time.Format(time.RFC3339), string(event.Data)) } func convertEvent(event cloudevents.Event) *v1alpha1.Event { diff --git a/sensors/listener_test.go b/sensors/listener_test.go index 855fff5518..2390c3ce2c 100644 --- a/sensors/listener_test.go +++ b/sensors/listener_test.go @@ -30,13 +30,7 @@ var ( fakeTrigger = &v1alpha1.Trigger{ Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", - K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - }, - }, + K8s: &v1alpha1.StandardK8STrigger{}, }, } @@ -88,9 +82,8 @@ func TestGetDependencyExpression(t *testing.T) { sensorCtx := &SensorContext{ sensor: obj, } - expr, err := sensorCtx.getDependencyExpression(context.Background(), *fakeTrigger) + _, err := sensorCtx.getDependencyExpression(context.Background(), *fakeTrigger) assert.NoError(t, err) - assert.Equal(t, "dep1 && dep2", expr) }) t.Run("get complex expression", func(t *testing.T) { @@ -115,18 +108,9 @@ func TestGetDependencyExpression(t *testing.T) { sensorCtx := &SensorContext{ sensor: obj, } - obj.Spec.DependencyGroups = []v1alpha1.DependencyGroup{ - {Name: "group-1", Dependencies: []string{"dep1", "dep1a"}}, - {Name: "group-2", Dependencies: []string{"dep2"}}, - } - obj.Spec.DeprecatedCircuit = "((group-2) || group-1)" trig := fakeTrigger.DeepCopy() - trig.Template.DeprecatedSwitch = &v1alpha1.TriggerSwitch{ - Any: []string{"group-1"}, - } - expr, err := sensorCtx.getDependencyExpression(context.Background(), *trig) + _, err := sensorCtx.getDependencyExpression(context.Background(), *trig) assert.NoError(t, err) - assert.Equal(t, "dep1 && dep1a", expr) }) t.Run("get conditions expression", func(t *testing.T) { @@ -156,14 +140,9 @@ func TestGetDependencyExpression(t *testing.T) { sensorCtx := &SensorContext{ sensor: obj, } - obj.Spec.DependencyGroups = []v1alpha1.DependencyGroup{ - {Name: "group-1", Dependencies: []string{"dep-1", "dep_1a"}}, - {Name: "group_2", Dependencies: []string{"dep-2"}}, - } trig := fakeTrigger.DeepCopy() - trig.Template.Conditions = "group-1 || group_2 || dep-3" - expr, err := sensorCtx.getDependencyExpression(context.Background(), *trig) + trig.Template.Conditions = "dep-1 || dep-1a || dep-3" + _, err := sensorCtx.getDependencyExpression(context.Background(), *trig) assert.NoError(t, err) - assert.Equal(t, "dep-3 || dep-2 || (dep-1 && dep_1a)", expr) }) } diff --git a/sensors/policy/resource-labels_test.go b/sensors/policy/resource-labels_test.go index e17a27ba0a..8860ff65ff 100644 --- a/sensors/policy/resource-labels_test.go +++ b/sensors/policy/resource-labels_test.go @@ -60,11 +60,6 @@ func TestResourceLabels_ApplyPolicy(t *testing.T) { Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "apps", - Resource: "deployments", - Version: "v1", - }, Source: &v1alpha1.ArtifactLocation{ Resource: &artifact, }, @@ -87,9 +82,9 @@ func TestResourceLabels_ApplyPolicy(t *testing.T) { } namespacableClient := client.Resource(schema.GroupVersionResource{ - Resource: trigger.Template.K8s.GroupVersionResource.Resource, - Version: trigger.Template.K8s.GroupVersionResource.Version, - Group: trigger.Template.K8s.GroupVersionResource.Group, + Resource: "deployments", + Version: "v1", + Group: "apps", }) ctx := context.TODO() @@ -127,7 +122,7 @@ func TestResourceLabels_ApplyPolicy(t *testing.T) { }, testFunc: func(err error) { assert.NotNil(t, err) - assert.Equal(t, wait.ErrWaitTimeout.Error(), err.Error()) + assert.True(t, wait.Interrupted(err)) }, }, } diff --git a/sensors/policy/status.go b/sensors/policy/status.go index 6347481f4d..94656cc379 100644 --- a/sensors/policy/status.go +++ b/sensors/policy/status.go @@ -18,8 +18,7 @@ package policy import ( "context" - - "github.com/pkg/errors" + "fmt" ) // StatusPolicy implements the policy for a HTTP trigger @@ -44,5 +43,5 @@ func (hp *StatusPolicy) ApplyPolicy(ctx context.Context) error { return nil } } - return errors.Errorf("policy application resulted in failure. http response status %d is not allowed", hp.Status) + return fmt.Errorf("policy application resulted in failure. http response status %d is not allowed", hp.Status) } diff --git a/sensors/trigger.go b/sensors/trigger.go index 5a73bae826..004d5d2c25 100644 --- a/sensors/trigger.go +++ b/sensors/trigger.go @@ -27,11 +27,14 @@ import ( argoworkflow "github.com/argoproj/argo-events/sensors/triggers/argo-workflow" awslambda "github.com/argoproj/argo-events/sensors/triggers/aws-lambda" eventhubs "github.com/argoproj/argo-events/sensors/triggers/azure-event-hubs" + servicebus "github.com/argoproj/argo-events/sensors/triggers/azure-service-bus" customtrigger "github.com/argoproj/argo-events/sensors/triggers/custom-trigger" + "github.com/argoproj/argo-events/sensors/triggers/email" "github.com/argoproj/argo-events/sensors/triggers/http" "github.com/argoproj/argo-events/sensors/triggers/kafka" logtrigger "github.com/argoproj/argo-events/sensors/triggers/log" "github.com/argoproj/argo-events/sensors/triggers/nats" + "github.com/argoproj/argo-events/sensors/triggers/pulsar" "github.com/argoproj/argo-events/sensors/triggers/slack" standardk8s "github.com/argoproj/argo-events/sensors/triggers/standard-k8s" ) @@ -87,6 +90,15 @@ func (sensorCtx *SensorContext) GetTrigger(ctx context.Context, trigger *v1alpha return result } + if trigger.Template.AzureServiceBus != nil { + result, err := servicebus.NewAzureServiceBusTrigger(sensorCtx.sensor, trigger, sensorCtx.azureServiceBusClients, log) + if err != nil { + log.Errorw("failed to new an Azure Service Bus trigger", zap.Error(err)) + return nil + } + return result + } + if trigger.Template.Kafka != nil { result, err := kafka.NewKafkaTrigger(sensorCtx.sensor, trigger, sensorCtx.kafkaProducers, log) if err != nil { @@ -96,6 +108,15 @@ func (sensorCtx *SensorContext) GetTrigger(ctx context.Context, trigger *v1alpha return result } + if trigger.Template.Pulsar != nil { + result, err := pulsar.NewPulsarTrigger(sensorCtx.sensor, trigger, sensorCtx.pulsarProducers, log) + if err != nil { + log.Errorw("failed to new a Pulsar trigger", zap.Error(err)) + return nil + } + return result + } + if trigger.Template.NATS != nil { result, err := nats.NewNATSTrigger(sensorCtx.sensor, trigger, sensorCtx.natsConnections, log) if err != nil { @@ -141,5 +162,13 @@ func (sensorCtx *SensorContext) GetTrigger(ctx context.Context, trigger *v1alpha return result } + if trigger.Template.Email != nil { + result, err := email.NewEmailTrigger(sensorCtx.sensor, trigger, log) + if err != nil { + log.Errorw("failed to new a Email trigger", zap.Error(err)) + return nil + } + return result + } return nil } diff --git a/sensors/triggers/apache-openwhisk/apache-openwhisk.go b/sensors/triggers/apache-openwhisk/apache-openwhisk.go index 25f2d010a4..47064d8f41 100644 --- a/sensors/triggers/apache-openwhisk/apache-openwhisk.go +++ b/sensors/triggers/apache-openwhisk/apache-openwhisk.go @@ -18,10 +18,10 @@ package apache_openwhisk import ( "context" "encoding/json" + "fmt" "net/http" "github.com/apache/openwhisk-client-go/whisk" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" @@ -45,17 +45,17 @@ type TriggerImpl struct { } // NewTriggerImpl returns a new TriggerImpl -func NewTriggerImpl(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, openWhiskClients map[string]*whisk.Client, logger *zap.SugaredLogger) (*TriggerImpl, error) { +func NewTriggerImpl(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, openWhiskClients common.StringKeyedMap[*whisk.Client], logger *zap.SugaredLogger) (*TriggerImpl, error) { openwhisktrigger := trigger.Template.OpenWhisk - client, ok := openWhiskClients[trigger.Template.Name] + client, ok := openWhiskClients.Load(trigger.Template.Name) if !ok { logger.Debugw("OpenWhisk trigger value", zap.Any("name", trigger.Template.Name), zap.Any("trigger", *trigger.Template.OpenWhisk)) logger.Infow("instantiating OpenWhisk client", zap.Any("trigger-name", trigger.Template.Name)) config, err := whisk.GetDefaultConfig() if err != nil { - return nil, errors.Wrap(err, "failed to get default configuration") + return nil, fmt.Errorf("failed to get default configuration, %w", err) } config.Host = openwhisktrigger.Host @@ -63,7 +63,7 @@ func NewTriggerImpl(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, openWhis if openwhisktrigger.AuthToken != nil { token, err := common.GetSecretFromVolume(openwhisktrigger.AuthToken) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve auth token") + return nil, fmt.Errorf("failed to retrieve auth token, %w", err) } config.AuthToken = token } @@ -79,10 +79,10 @@ func NewTriggerImpl(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, openWhis client, err = whisk.NewClient(http.DefaultClient, config) if err != nil { - return nil, errors.Wrap(err, "failed to instantiate OpenWhisk client") + return nil, fmt.Errorf("failed to instantiate OpenWhisk client, %w", err) } - openWhiskClients[trigger.Template.Name] = client + openWhiskClients.Store(trigger.Template.Name, client) } return &TriggerImpl{ @@ -108,12 +108,12 @@ func (t *TriggerImpl) FetchResource(ctx context.Context) (interface{}, error) { func (t *TriggerImpl) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { fetchedResource, ok := resource.(*v1alpha1.OpenWhiskTrigger) if !ok { - return nil, errors.New("failed to interpret the fetched trigger resource") + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") } resourceBytes, err := json.Marshal(fetchedResource) if err != nil { - return nil, errors.Wrap(err, "failed to marshal the OpenWhisk trigger resource") + return nil, fmt.Errorf("failed to marshal the OpenWhisk trigger resource, %w", err) } parameters := fetchedResource.Parameters if parameters != nil { @@ -123,7 +123,7 @@ func (t *TriggerImpl) ApplyResourceParameters(events map[string]*v1alpha1.Event, } var openwhisktrigger *v1alpha1.OpenWhiskTrigger if err := json.Unmarshal(updatedResourceBytes, &openwhisktrigger); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the updated OpenWhisk trigger resource after applying resource parameters") + return nil, fmt.Errorf("failed to unmarshal the updated OpenWhisk trigger resource after applying resource parameters, %w", err) } t.Logger.Debugw("applied parameters to the OpenWhisk trigger", zap.Any("name", t.Trigger.Template.Name), zap.Any("trigger", *openwhisktrigger)) @@ -141,7 +141,7 @@ func (t *TriggerImpl) Execute(ctx context.Context, events map[string]*v1alpha1.E openwhisktrigger, ok := resource.(*v1alpha1.OpenWhiskTrigger) if !ok { - return nil, errors.New("failed to interpret the OpenWhisk trigger resource") + return nil, fmt.Errorf("failed to interpret the OpenWhisk trigger resource") } if openwhisktrigger.Payload != nil { @@ -155,7 +155,7 @@ func (t *TriggerImpl) Execute(ctx context.Context, events map[string]*v1alpha1.E response, status, err := t.OpenWhiskClient.Actions.Invoke(openwhisktrigger.ActionName, payload, true, true) if err != nil { - return nil, errors.Wrapf(err, "failed to invoke action %s", openwhisktrigger.ActionName) + return nil, fmt.Errorf("failed to invoke action %s, %w", openwhisktrigger.ActionName, err) } t.Logger.Debugw("response for the OpenWhisk action invocation", zap.Any("name", t.Trigger.Template.Name), zap.Any("response", response)) @@ -170,7 +170,7 @@ func (t *TriggerImpl) ApplyPolicy(ctx context.Context, resource interface{}) err } response, ok := resource.(*http.Response) if !ok { - return errors.New("failed to interpret the trigger execution response") + return fmt.Errorf("failed to interpret the trigger execution response") } p := policy.NewStatusPolicy(response.StatusCode, t.Trigger.Policy.Status.GetAllow()) diff --git a/sensors/triggers/argo-workflow/argo-workflow.go b/sensors/triggers/argo-workflow/argo-workflow.go index 178fabeaf2..5505757351 100644 --- a/sensors/triggers/argo-workflow/argo-workflow.go +++ b/sensors/triggers/argo-workflow/argo-workflow.go @@ -1,28 +1,30 @@ /* Copyright 2020 BlackRock, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - package argo_workflow import ( "context" "fmt" - "io/ioutil" + "io" "os" "os/exec" "strconv" + "strings" "time" - "github.com/pkg/errors" "go.uber.org/zap" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -54,6 +56,7 @@ type ArgoWorkflowTrigger struct { Logger *zap.SugaredLogger namespableDynamicClient dynamic.NamespaceableResourceInterface + cmdRunner func(cmd *exec.Cmd) error } // NewArgoWorkflowTrigger returns a new Argo workflow trigger @@ -64,6 +67,9 @@ func NewArgoWorkflowTrigger(k8sClient kubernetes.Interface, dynamicClient dynami Sensor: sensor, Trigger: trigger, Logger: logger.With(logging.LabelTriggerType, apicommon.ArgoWorkflowTrigger), + cmdRunner: func(cmd *exec.Cmd) error { + return cmd.Run() + }, } } @@ -82,7 +88,7 @@ func (t *ArgoWorkflowTrigger) FetchResource(ctx context.Context) (interface{}, e func (t *ArgoWorkflowTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { obj, ok := resource.(*unstructured.Unstructured) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } if err := triggers.ApplyResourceParameters(events, t.Trigger.Template.ArgoWorkflow.Parameters, obj); err != nil { return nil, err @@ -101,16 +107,16 @@ func (t *ArgoWorkflowTrigger) Execute(ctx context.Context, events map[string]*v1 obj, ok := resource.(*unstructured.Unstructured) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } name := obj.GetName() if name == "" { if op != v1alpha1.Submit { - return nil, errors.Errorf("failed to execute the workflow %v operation, no name is given", op) + return nil, fmt.Errorf("failed to execute the workflow %v operation, no name is given", op) } if obj.GetGenerateName() == "" { - return nil, errors.New("failed to trigger the workflow, neither name nor generatedName is given") + return nil, fmt.Errorf("failed to trigger the workflow, neither name nor generateName is given") } } @@ -118,7 +124,7 @@ func (t *ArgoWorkflowTrigger) Execute(ctx context.Context, events map[string]*v1 if op == v1alpha1.Submit { submittedWFLabels["events.argoproj.io/trigger"] = trigger.Template.Name submittedWFLabels["events.argoproj.io/action-timestamp"] = strconv.Itoa(int(time.Now().UnixNano() / int64(time.Millisecond))) - common.ApplySensorUniquenessLabels(submittedWFLabels, t.Sensor) + common.ApplySensorLabels(submittedWFLabels, t.Sensor) err := common.ApplyEventLabels(submittedWFLabels, events) if err != nil { t.Logger.Info("failed to apply event labels, skipping...") @@ -134,9 +140,9 @@ func (t *ArgoWorkflowTrigger) Execute(ctx context.Context, events map[string]*v1 switch op { case v1alpha1.Submit: - file, err := ioutil.TempFile("", fmt.Sprintf("%s%s", name, obj.GetGenerateName())) + file, err := os.CreateTemp("", fmt.Sprintf("%s%s", name, obj.GetGenerateName())) if err != nil { - return nil, errors.Wrapf(err, "failed to create a temp file for the workflow %s", obj.GetName()) + return nil, fmt.Errorf("failed to create a temp file for the workflow %s, %w", obj.GetName(), err) } defer os.Remove(file.Name()) @@ -156,9 +162,21 @@ func (t *ArgoWorkflowTrigger) Execute(ctx context.Context, events map[string]*v1 } if _, err := file.Write(jObj); err != nil { - return nil, errors.Wrapf(err, "failed to write workflow json %s to the temp file %s", name, file.Name()) + return nil, fmt.Errorf("failed to write workflow json %s to the temp file %s, %w", name, file.Name(), err) } cmd = exec.Command("argo", "-n", namespace, "submit", file.Name()) + case v1alpha1.SubmitFrom: + kind := obj.GetKind() + switch strings.ToLower(kind) { + case "cronworkflow": + kind = "cronwf" + case "workflowtemplate": + kind = "workflowtemplate" + default: + return nil, fmt.Errorf("invalid kind %s", kind) + } + fromArg := fmt.Sprintf("%s/%s", kind, name) + cmd = exec.Command("argo", "-n", namespace, "submit", "--from", fromArg) case v1alpha1.Resubmit: cmd = exec.Command("argo", "-n", namespace, "resubmit", name) case v1alpha1.Resume: @@ -169,14 +187,18 @@ func (t *ArgoWorkflowTrigger) Execute(ctx context.Context, events map[string]*v1 cmd = exec.Command("argo", "-n", namespace, "suspend", name) case v1alpha1.Terminate: cmd = exec.Command("argo", "-n", namespace, "terminate", name) + case v1alpha1.Stop: + cmd = exec.Command("argo", "-n", namespace, "stop", name) default: - return nil, errors.Errorf("unknown operation type %s", string(op)) + return nil, fmt.Errorf("unknown operation type %s", string(op)) } + var errBuff strings.Builder + cmd.Stderr = io.MultiWriter(os.Stderr, &errBuff) cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - return nil, errors.Wrapf(err, "failed to execute %s command for workflow %s", string(op), name) + cmd.Args = append(cmd.Args, trigger.Template.ArgoWorkflow.Args...) + if err := t.cmdRunner(cmd); err != nil { + return nil, fmt.Errorf("failed to execute %s command for workflow %s, %w", string(op), name, err) } t.namespableDynamicClient = t.DynamicClient.Resource(schema.GroupVersionResource{ @@ -193,7 +215,7 @@ func (t *ArgoWorkflowTrigger) Execute(ctx context.Context, events map[string]*v1 return nil, err } if len(l.Items) == 0 { - return nil, errors.New("failed to list created workflows for unknown reason") + return nil, fmt.Errorf("failed to list created workflows for unknown reason") } return l.Items[0], nil } @@ -208,7 +230,7 @@ func (t *ArgoWorkflowTrigger) ApplyPolicy(ctx context.Context, resource interfac obj, ok := resource.(*unstructured.Unstructured) if !ok { - return errors.New("failed to interpret the trigger resource") + return fmt.Errorf("failed to interpret the trigger resource") } p := policy.NewResourceLabels(trigger, t.namespableDynamicClient, obj) @@ -218,13 +240,12 @@ func (t *ArgoWorkflowTrigger) ApplyPolicy(ctx context.Context, resource interfac err := p.ApplyPolicy(ctx) if err != nil { - switch err { - case wait.ErrWaitTimeout: + if wait.Interrupted(err) { if trigger.Policy.K8s.ErrorOnBackoffTimeout { - return errors.Errorf("failed to determine status of the triggered resource. setting trigger state as failed") + return fmt.Errorf("failed to determine status of the triggered resource. setting trigger state as failed") } return nil - default: + } else { return err } } diff --git a/sensors/triggers/argo-workflow/argo-workflow_test.go b/sensors/triggers/argo-workflow/argo-workflow_test.go index e49ecb8cbc..458401f13b 100644 --- a/sensors/triggers/argo-workflow/argo-workflow_test.go +++ b/sensors/triggers/argo-workflow/argo-workflow_test.go @@ -17,8 +17,13 @@ package argo_workflow import ( "context" + "fmt" + "os/exec" "testing" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "github.com/argoproj/argo-events/common/logging" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -40,19 +45,17 @@ var sensorObj = &v1alpha1.Sensor{ { Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", - K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - }, - }, + K8s: &v1alpha1.StandardK8STrigger{}, }, }, }, }, } +var ( + un = newUnstructured("argoproj.io/v1alpha1", "Workflow", "fake", "test") +) + func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured { return &unstructured.Unstructured{ Object: map[string]interface{}{ @@ -69,10 +72,9 @@ func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Uns } } -func getFakeWfTrigger() *ArgoWorkflowTrigger { +func getFakeWfTrigger(operation v1alpha1.ArgoWorkflowOperation) *ArgoWorkflowTrigger { runtimeScheme := runtime.NewScheme() client := dynamicFake.NewSimpleDynamicClient(runtimeScheme) - un := newUnstructured("argoproj.io/v1alpha1", "Workflow", "fake", "test") artifact := apicommon.NewResource(un) trigger := &v1alpha1.Trigger{ Template: &v1alpha1.TriggerTemplate{ @@ -81,12 +83,7 @@ func getFakeWfTrigger() *ArgoWorkflowTrigger { Source: &v1alpha1.ArtifactLocation{ Resource: &artifact, }, - Operation: "Submit", - GroupVersionResource: metav1.GroupVersionResource{ - Group: "argoproj.io", - Version: "v1alpha1", - Resource: "workflows", - }, + Operation: operation, }, }, } @@ -94,7 +91,7 @@ func getFakeWfTrigger() *ArgoWorkflowTrigger { } func TestFetchResource(t *testing.T) { - trigger := getFakeWfTrigger() + trigger := getFakeWfTrigger("submit") resource, err := trigger.FetchResource(context.TODO()) assert.Nil(t, err) assert.NotNil(t, resource) @@ -107,3 +104,42 @@ func TestFetchResource(t *testing.T) { func TestApplyResourceParameters(t *testing.T) { } + +func TestExecute(t *testing.T) { + t.Run("passes trigger args as flags to argo command", func(t *testing.T) { + ctx := context.Background() + var actual string + firstArg := "--foo" + secondArg := "--bar" + trigger := storingCmdTrigger(&actual, firstArg, secondArg) + + _, err := namespacedClientFrom(trigger).Namespace(un.GetNamespace()).Create(ctx, un, metav1.CreateOptions{}) + assert.Nil(t, err) + + _, err = trigger.Execute(ctx, nil, un) + assert.Nil(t, err) + + expected := fmt.Sprintf("argo -n %s resume test %s %s", un.GetNamespace(), firstArg, secondArg) + assert.Contains(t, actual, expected) + }) +} + +func storingCmdTrigger(cmdStr *string, wfArgs ...string) *ArgoWorkflowTrigger { + trigger := getFakeWfTrigger("resume") + f := func(cmd *exec.Cmd) error { + *cmdStr = cmd.String() + return nil + } + trigger.cmdRunner = f + trigger.Trigger.Template.ArgoWorkflow.Args = wfArgs + + return trigger +} + +func namespacedClientFrom(trigger *ArgoWorkflowTrigger) dynamic.NamespaceableResourceInterface { + return trigger.DynamicClient.Resource(schema.GroupVersionResource{ + Group: "argoproj.io", + Version: "v1alpha1", + Resource: "workflows", + }) +} diff --git a/sensors/triggers/aws-lambda/aws-lambda.go b/sensors/triggers/aws-lambda/aws-lambda.go index 8ae48e33c5..013f5ea232 100644 --- a/sensors/triggers/aws-lambda/aws-lambda.go +++ b/sensors/triggers/aws-lambda/aws-lambda.go @@ -18,12 +18,13 @@ package aws_lambda import ( "context" "encoding/json" + "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/lambda" - "github.com/pkg/errors" "go.uber.org/zap" + "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" commonaws "github.com/argoproj/argo-events/eventsources/common/aws" apicommon "github.com/argoproj/argo-events/pkg/apis/common" @@ -45,17 +46,17 @@ type AWSLambdaTrigger struct { } // NewAWSLambdaTrigger returns a new AWS Lambda context -func NewAWSLambdaTrigger(lambdaClients map[string]*lambda.Lambda, sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger) (*AWSLambdaTrigger, error) { +func NewAWSLambdaTrigger(lambdaClients common.StringKeyedMap[*lambda.Lambda], sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger) (*AWSLambdaTrigger, error) { lambdatrigger := trigger.Template.AWSLambda - lambdaClient, ok := lambdaClients[trigger.Template.Name] + lambdaClient, ok := lambdaClients.Load(trigger.Template.Name) if !ok { - awsSession, err := commonaws.CreateAWSSessionWithCredsInVolume(lambdatrigger.Region, "", lambdatrigger.AccessKey, lambdatrigger.SecretKey) + awsSession, err := commonaws.CreateAWSSessionWithCredsInVolume(lambdatrigger.Region, lambdatrigger.RoleARN, lambdatrigger.AccessKey, lambdatrigger.SecretKey, nil) if err != nil { - return nil, errors.Wrap(err, "failed to create a AWS session") + return nil, fmt.Errorf("failed to create a AWS session, %w", err) } lambdaClient = lambda.New(awsSession, &aws.Config{Region: &lambdatrigger.Region}) - lambdaClients[trigger.Template.Name] = lambdaClient + lambdaClients.Store(trigger.Template.Name, lambdaClient) } return &AWSLambdaTrigger{ @@ -80,7 +81,7 @@ func (t *AWSLambdaTrigger) FetchResource(ctx context.Context) (interface{}, erro func (t *AWSLambdaTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { resourceBytes, err := json.Marshal(resource) if err != nil { - return nil, errors.Wrap(err, "failed to marshal the aws lamda trigger resource") + return nil, fmt.Errorf("failed to marshal the aws lamda trigger resource, %w", err) } parameters := t.Trigger.Template.AWSLambda.Parameters if parameters != nil { @@ -90,7 +91,7 @@ func (t *AWSLambdaTrigger) ApplyResourceParameters(events map[string]*v1alpha1.E } var ht *v1alpha1.AWSLambdaTrigger if err := json.Unmarshal(updatedResourceBytes, &ht); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the updated aws lambda trigger resource after applying resource parameters") + return nil, fmt.Errorf("failed to unmarshal the updated aws lambda trigger resource after applying resource parameters, %w", err) } return ht, nil } @@ -101,11 +102,11 @@ func (t *AWSLambdaTrigger) ApplyResourceParameters(events map[string]*v1alpha1.E func (t *AWSLambdaTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { trigger, ok := resource.(*v1alpha1.AWSLambdaTrigger) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } if trigger.Payload == nil { - return nil, errors.New("payload parameters are not specified") + return nil, fmt.Errorf("payload parameters are not specified") } payload, err := triggers.ConstructPayload(events, trigger.Payload) @@ -133,7 +134,7 @@ func (t *AWSLambdaTrigger) ApplyPolicy(ctx context.Context, resource interface{} obj, ok := resource.(*lambda.InvokeOutput) if !ok { - return errors.New("failed to interpret the trigger resource") + return fmt.Errorf("failed to interpret the trigger resource") } p := policy.NewStatusPolicy(int(*obj.StatusCode), t.Trigger.Policy.Status.GetAllow()) diff --git a/sensors/triggers/aws-lambda/aws-lambda_test.go b/sensors/triggers/aws-lambda/aws-lambda_test.go index 4e224ef0ac..543f9df3d0 100644 --- a/sensors/triggers/aws-lambda/aws-lambda_test.go +++ b/sensors/triggers/aws-lambda/aws-lambda_test.go @@ -29,7 +29,7 @@ import ( "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) -var sensorObj = &v1alpha1.Sensor{ +var sensorObjSparse = &v1alpha1.Sensor{ ObjectMeta: metav1.ObjectMeta{ Name: "fake-sensor", Namespace: "fake", @@ -38,7 +38,27 @@ var sensorObj = &v1alpha1.Sensor{ Triggers: []v1alpha1.Trigger{ { Template: &v1alpha1.TriggerTemplate{ - Name: "fake-trigger", + Name: "fake-trigger-sparse", + AWSLambda: &v1alpha1.AWSLambdaTrigger{ + FunctionName: "fake-function", + Region: "us-east", + }, + }, + }, + }, + }, +} + +var sensorObjFull = &v1alpha1.Sensor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-sensor", + Namespace: "fake", + }, + Spec: v1alpha1.SensorSpec{ + Triggers: []v1alpha1.Trigger{ + { + Template: &v1alpha1.TriggerTemplate{ + Name: "fake-trigger-full", AWSLambda: &v1alpha1.AWSLambdaTrigger{ FunctionName: "fake-function", AccessKey: &corev1.SecretKeySelector{ @@ -53,7 +73,17 @@ var sensorObj = &v1alpha1.Sensor{ }, Key: "secretkey", }, - Region: "us-east", + Region: "us-east", + RoleARN: "arn:aws:iam::123456789012:role/fake-trigger-full", + Payload: []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + Value: new(string), + }, + Dest: "metadata.label.value", + }, + }, }, }, }, @@ -61,84 +91,98 @@ var sensorObj = &v1alpha1.Sensor{ }, } -func getAWSTrigger() *AWSLambdaTrigger { - return &AWSLambdaTrigger{ - LambdaClient: nil, - Sensor: sensorObj.DeepCopy(), - Trigger: &sensorObj.Spec.Triggers[0], - Logger: logging.NewArgoEventsLogger(), +func getAWSTriggers() []AWSLambdaTrigger { + return []AWSLambdaTrigger{ + { + LambdaClient: nil, + Sensor: sensorObjSparse.DeepCopy(), + Trigger: &sensorObjSparse.Spec.Triggers[0], + Logger: logging.NewArgoEventsLogger(), + }, + { + LambdaClient: nil, + Sensor: sensorObjFull.DeepCopy(), + Trigger: &sensorObjFull.Spec.Triggers[0], + Logger: logging.NewArgoEventsLogger(), + }, } } func TestAWSLambdaTrigger_FetchResource(t *testing.T) { - trigger := getAWSTrigger() - resource, err := trigger.FetchResource(context.TODO()) - assert.Nil(t, err) - assert.NotNil(t, resource) - - at, ok := resource.(*v1alpha1.AWSLambdaTrigger) - assert.Nil(t, err) - assert.Equal(t, true, ok) - assert.Equal(t, "fake-function", at.FunctionName) + triggers := getAWSTriggers() + for _, trigger := range triggers { + resource, err := trigger.FetchResource(context.TODO()) + assert.Nil(t, err) + assert.NotNil(t, resource) + + at, ok := resource.(*v1alpha1.AWSLambdaTrigger) + assert.Nil(t, err) + assert.Equal(t, true, ok) + assert.Equal(t, "fake-function", at.FunctionName) + } } func TestAWSLambdaTrigger_ApplyResourceParameters(t *testing.T) { - trigger := getAWSTrigger() - testEvents := map[string]*v1alpha1.Event{ - "fake-dependency": { - Context: &v1alpha1.EventContext{ - ID: "1", - Type: "webhook", - Source: "webhook-gateway", - DataContentType: "application/json", - SpecVersion: cloudevents.VersionV1, - Subject: "example-1", + triggers := getAWSTriggers() + for _, trigger := range triggers { + testEvents := map[string]*v1alpha1.Event{ + "fake-dependency": { + Context: &v1alpha1.EventContext{ + ID: "1", + Type: "webhook", + Source: "webhook-gateway", + DataContentType: "application/json", + SpecVersion: cloudevents.VersionV1, + Subject: "example-1", + }, + Data: []byte(`{"function": "real-function"}`), }, - Data: []byte(`{"function": "real-function"}`), - }, - } + } - defaultValue := "default" - defaultRegion := "region" + defaultValue := "default" + defaultRegion := "region" - trigger.Trigger.Template.AWSLambda.Parameters = []v1alpha1.TriggerParameter{ - { - Src: &v1alpha1.TriggerParameterSource{ - DependencyName: "fake-dependency", - DataKey: "function", - Value: &defaultValue, + trigger.Trigger.Template.AWSLambda.Parameters = []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "function", + Value: &defaultValue, + }, + Dest: "functionName", }, - Dest: "functionName", - }, - { - Src: &v1alpha1.TriggerParameterSource{ - DependencyName: "fake-dependency", - DataKey: "region", - Value: &defaultRegion, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "region", + Value: &defaultRegion, + }, + Dest: "region", }, - Dest: "region", - }, - } + } - response, err := trigger.ApplyResourceParameters(testEvents, trigger.Trigger.Template.AWSLambda) - assert.Nil(t, err) - assert.NotNil(t, response) + response, err := trigger.ApplyResourceParameters(testEvents, trigger.Trigger.Template.AWSLambda) + assert.Nil(t, err) + assert.NotNil(t, response) - updatedObj, ok := response.(*v1alpha1.AWSLambdaTrigger) - assert.Equal(t, true, ok) - assert.Equal(t, "real-function", updatedObj.FunctionName) - assert.Equal(t, "region", updatedObj.Region) + updatedObj, ok := response.(*v1alpha1.AWSLambdaTrigger) + assert.Equal(t, true, ok) + assert.Equal(t, "real-function", updatedObj.FunctionName) + assert.Equal(t, "region", updatedObj.Region) + } } func TestAWSLambdaTrigger_ApplyPolicy(t *testing.T) { - trigger := getAWSTrigger() - status := int64(200) - response := &lambda.InvokeOutput{ - StatusCode: &status, - } - trigger.Trigger.Policy = &v1alpha1.TriggerPolicy{ - Status: &v1alpha1.StatusPolicy{Allow: []int32{200, 300}}, + triggers := getAWSTriggers() + for _, trigger := range triggers { + status := int64(200) + response := &lambda.InvokeOutput{ + StatusCode: &status, + } + trigger.Trigger.Policy = &v1alpha1.TriggerPolicy{ + Status: &v1alpha1.StatusPolicy{Allow: []int32{200, 300}}, + } + err := trigger.ApplyPolicy(context.TODO(), response) + assert.Nil(t, err) } - err := trigger.ApplyPolicy(context.TODO(), response) - assert.Nil(t, err) } diff --git a/sensors/triggers/azure-event-hubs/azure_event_hubs.go b/sensors/triggers/azure-event-hubs/azure_event_hubs.go index edbfc0b444..879f678201 100644 --- a/sensors/triggers/azure-event-hubs/azure_event_hubs.go +++ b/sensors/triggers/azure-event-hubs/azure_event_hubs.go @@ -20,10 +20,8 @@ import ( "encoding/json" "fmt" - "github.com/pkg/errors" - "go.uber.org/zap" - eventhub "github.com/Azure/azure-event-hubs-go/v3" + "go.uber.org/zap" "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" @@ -45,10 +43,10 @@ type AzureEventHubsTrigger struct { } // NewAzureEventHubsTrigger returns a new azure event hubs context. -func NewAzureEventHubsTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, azureEventHubsClient map[string]*eventhub.Hub, logger *zap.SugaredLogger) (*AzureEventHubsTrigger, error) { +func NewAzureEventHubsTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, azureEventHubsClient common.StringKeyedMap[*eventhub.Hub], logger *zap.SugaredLogger) (*AzureEventHubsTrigger, error) { azureEventHubsTrigger := trigger.Template.AzureEventHubs - hub, ok := azureEventHubsClient[trigger.Template.Name] + hub, ok := azureEventHubsClient.Load(trigger.Template.Name) if !ok { // form event hubs connection string in the ff format: @@ -74,7 +72,7 @@ func NewAzureEventHubsTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger return nil, err } - azureEventHubsClient[trigger.Template.Name] = hub + azureEventHubsClient.Store(trigger.Template.Name, hub) } return &AzureEventHubsTrigger{ @@ -100,12 +98,12 @@ func (t *AzureEventHubsTrigger) FetchResource(ctx context.Context) (interface{}, func (t *AzureEventHubsTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { fetchedResource, ok := resource.(*v1alpha1.AzureEventHubsTrigger) if !ok { - return nil, errors.New("failed to interpret the fetched trigger resource") + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") } resourceBytes, err := json.Marshal(fetchedResource) if err != nil { - return nil, errors.Wrap(err, "failed to marshal the azure event hubs trigger resource") + return nil, fmt.Errorf("failed to marshal the azure event hubs trigger resource, %w", err) } parameters := fetchedResource.Parameters if parameters != nil { @@ -115,7 +113,7 @@ func (t *AzureEventHubsTrigger) ApplyResourceParameters(events map[string]*v1alp } var ht *v1alpha1.AzureEventHubsTrigger if err := json.Unmarshal(updatedResourceBytes, &ht); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the updated azure event hubs trigger resource after applying resource parameters") + return nil, fmt.Errorf("failed to unmarshal the updated azure event hubs trigger resource after applying resource parameters, %w", err) } return ht, nil } @@ -126,11 +124,11 @@ func (t *AzureEventHubsTrigger) ApplyResourceParameters(events map[string]*v1alp func (t *AzureEventHubsTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { trigger, ok := resource.(*v1alpha1.AzureEventHubsTrigger) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } if trigger.Payload == nil { - return nil, errors.New("payload parameters are not specified") + return nil, fmt.Errorf("payload parameters are not specified") } payload, err := triggers.ConstructPayload(events, trigger.Payload) diff --git a/sensors/triggers/azure-service-bus/azure_service_bus.go b/sensors/triggers/azure-service-bus/azure_service_bus.go new file mode 100644 index 0000000000..ba145a42f4 --- /dev/null +++ b/sensors/triggers/azure-service-bus/azure_service_bus.go @@ -0,0 +1,175 @@ +/* +Copyright 2020 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package azureservicebus + +import ( + "context" + "encoding/json" + "fmt" + + servicebus "github.com/Azure/azure-sdk-for-go/sdk/messaging/azservicebus" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + "github.com/argoproj/argo-events/sensors/triggers" +) + +// AzureServiceBusTrigger describes the trigger to send messages to a Service Bus +type AzureServiceBusTrigger struct { + // Sensor object + Sensor *v1alpha1.Sensor + // Trigger reference + Trigger *v1alpha1.Trigger + // Sender refers to Azure Service Bus Sender struct + Sender *servicebus.Sender + // Logger to log stuff + Logger *zap.SugaredLogger +} + +func NewAzureServiceBusTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, azureServiceBusClients common.StringKeyedMap[*servicebus.Sender], logger *zap.SugaredLogger) (*AzureServiceBusTrigger, error) { + triggerLogger := logger.With(logging.LabelTriggerType, apicommon.AzureServiceBusTrigger) + azureServiceBusTrigger := trigger.Template.AzureServiceBus + + sender, ok := azureServiceBusClients.Load(trigger.Template.Name) + + if !ok { + connStr, err := common.GetSecretFromVolume(azureServiceBusTrigger.ConnectionString) + if err != nil { + triggerLogger.With("connection-string", azureServiceBusTrigger.ConnectionString.Name).Errorw("failed to retrieve connection string from secret", zap.Error(err)) + return nil, err + } + + triggerLogger.Info("connecting to the service bus...") + clientOptions := servicebus.ClientOptions{} + if azureServiceBusTrigger.TLS != nil { + tlsConfig, err := common.GetTLSConfig(azureServiceBusTrigger.TLS) + if err != nil { + triggerLogger.Errorw("failed to get the tls configuration", zap.Error(err)) + return nil, err + } + clientOptions.TLSConfig = tlsConfig + } + + client, err := servicebus.NewClientFromConnectionString(connStr, &clientOptions) + if err != nil { + triggerLogger.Errorw("failed to create a service bus client", zap.Error(err)) + return nil, err + } + + // Set queueOrTopicName to be azureServiceBusTrigger.QueueName or azureServiceBusTrigger.TopicName + var queueOrTopicName string + switch { + case azureServiceBusTrigger.QueueName != "": + queueOrTopicName = azureServiceBusTrigger.QueueName + case azureServiceBusTrigger.TopicName != "": + queueOrTopicName = azureServiceBusTrigger.TopicName + default: + return nil, fmt.Errorf("neither queue name nor topic name is specified") + } + + logger.With("queueOrTopicName", queueOrTopicName).Info("creating a new sender...") + + sender, err = client.NewSender(queueOrTopicName, &servicebus.NewSenderOptions{}) + if err != nil { + triggerLogger.Errorw("failed to create a service bus sender", zap.Error(err)) + return nil, err + } + + azureServiceBusClients.Store(trigger.Template.Name, sender) + } + + return &AzureServiceBusTrigger{ + Sensor: sensor, + Trigger: trigger, + Sender: sender, + Logger: triggerLogger, + }, nil +} + +// GetTriggerType returns the type of the trigger +func (t *AzureServiceBusTrigger) GetTriggerType() apicommon.TriggerType { + return apicommon.AzureServiceBusTrigger +} + +// FetchResource fetches the trigger resource +func (t *AzureServiceBusTrigger) FetchResource(ctx context.Context) (interface{}, error) { + return t.Trigger.Template.AzureServiceBus, nil +} + +// ApplyResourceParameters applies parameters to the trigger resource +func (t *AzureServiceBusTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { + fetchedResource, ok := resource.(*v1alpha1.AzureServiceBusTrigger) + if !ok { + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") + } + + resourceBytes, err := json.Marshal(fetchedResource) + if err != nil { + return nil, fmt.Errorf("failed to marshal the azure service bus trigger resource, %w", err) + } + + parameters := fetchedResource.Parameters + if parameters != nil { + updatedResourceBytes, err := triggers.ApplyParams(resourceBytes, parameters, events) + if err != nil { + return nil, err + } + var sbTrigger *v1alpha1.AzureServiceBusTrigger + if err := json.Unmarshal(updatedResourceBytes, &sbTrigger); err != nil { + return nil, fmt.Errorf("failed to unmarshal the updated azure service bus trigger resource after applying resource parameters, %w", err) + } + return sbTrigger, nil + } + + return resource, nil +} + +// Execute executes the trigger +func (t *AzureServiceBusTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { + trigger, ok := resource.(*v1alpha1.AzureServiceBusTrigger) + if !ok { + return nil, fmt.Errorf("failed to interpret the trigger resource") + } + + if trigger.Payload == nil { + return nil, fmt.Errorf("payload parameters are not specified") + } + + payload, err := triggers.ConstructPayload(events, trigger.Payload) + if err != nil { + return nil, err + } + + message := &servicebus.Message{ + Body: payload, + } + + err = t.Sender.SendMessage(ctx, message, &servicebus.SendMessageOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to send a message to the service bus, %w", err) + } + t.Logger.Info("successfully sent message to the service bus") + + return nil, nil +} + +// ApplyPolicy applies the trigger policy +func (t *AzureServiceBusTrigger) ApplyPolicy(ctx context.Context, resource interface{}) error { + return nil +} diff --git a/sensors/triggers/custom-trigger/custom-trigger.go b/sensors/triggers/custom-trigger/custom-trigger.go index fa1c809f9e..71147377a7 100644 --- a/sensors/triggers/custom-trigger/custom-trigger.go +++ b/sensors/triggers/custom-trigger/custom-trigger.go @@ -18,13 +18,14 @@ package customtrigger import ( "context" "encoding/json" + "fmt" "github.com/ghodss/yaml" - "github.com/pkg/errors" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" "k8s.io/apimachinery/pkg/util/wait" "github.com/argoproj/argo-events/common" @@ -47,7 +48,7 @@ type CustomTrigger struct { } // NewCustomTrigger returns a new custom trigger -func NewCustomTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger, customTriggerClients map[string]*grpc.ClientConn) (*CustomTrigger, error) { +func NewCustomTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger, customTriggerClients common.StringKeyedMap[*grpc.ClientConn]) (*CustomTrigger, error) { customTrigger := &CustomTrigger{ Sensor: sensor, Trigger: trigger, @@ -56,7 +57,7 @@ func NewCustomTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger ct := trigger.Template.CustomTrigger - if conn, ok := customTriggerClients[trigger.Template.Name]; ok { + if conn, ok := customTriggerClients.Load(trigger.Template.Name); ok { if conn.GetState() == connectivity.Ready { logger.Info("trigger client connection is ready...") customTrigger.triggerClient = triggers.NewTriggerClient(conn) @@ -64,14 +65,14 @@ func NewCustomTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger } logger.Info("trigger client connection is closed, creating new one...") - delete(customTriggerClients, trigger.Template.Name) + customTriggerClients.Delete(trigger.Template.Name) } logger.Infow("instantiating trigger client...", zap.Any("server-url", ct.ServerURL)) opt := []grpc.DialOption{ grpc.WithBlock(), - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), } if ct.Secure { @@ -83,11 +84,8 @@ func NewCustomTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger if err != nil { return nil, err } - case ct.DeprecatedCertFilePath != "": - // DEPRECATED - certFilePath = ct.DeprecatedCertFilePath default: - return nil, errors.New("invalid config, CERT secret not defined") + return nil, fmt.Errorf("invalid config, CERT secret not defined") } creds, err := credentials.NewClientTLSFromFile(certFilePath, ct.ServerNameOverride) if err != nil { @@ -119,7 +117,7 @@ func NewCustomTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger } customTrigger.triggerClient = triggers.NewTriggerClient(conn) - customTriggerClients[trigger.Template.Name] = conn + customTriggerClients.Store(trigger.Template.Name, conn) logger.Info("successfully setup the trigger client...") return customTrigger, nil @@ -134,7 +132,7 @@ func (ct *CustomTrigger) GetTriggerType() apicommon.TriggerType { func (ct *CustomTrigger) FetchResource(ctx context.Context) (interface{}, error) { specBody, err := yaml.Marshal(ct.Trigger.Template.CustomTrigger.Spec) if err != nil { - return nil, errors.Wrap(err, "failed to parse the custom trigger spec body") + return nil, fmt.Errorf("failed to parse the custom trigger spec body, %w", err) } ct.Logger.Debugw("trigger spec body", zap.Any("spec", string(specBody))) @@ -143,7 +141,7 @@ func (ct *CustomTrigger) FetchResource(ctx context.Context) (interface{}, error) Resource: specBody, }) if err != nil { - return nil, errors.Wrapf(err, "failed to fetch the custom trigger resource for %s", ct.Trigger.Template.Name) + return nil, fmt.Errorf("failed to fetch the custom trigger resource for %s, %w", ct.Trigger.Template.Name, err) } ct.Logger.Debugw("fetched resource", zap.Any("resource", string(resource.Resource))) @@ -154,7 +152,7 @@ func (ct *CustomTrigger) FetchResource(ctx context.Context) (interface{}, error) func (ct *CustomTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { obj, ok := resource.([]byte) if !ok { - return nil, errors.New("failed to interpret the trigger resource for resource parameters application") + return nil, fmt.Errorf("failed to interpret the trigger resource for resource parameters application") } parameters := ct.Trigger.Template.CustomTrigger.Parameters @@ -162,12 +160,12 @@ func (ct *CustomTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Eve // only JSON formatted resource body is eligible for parameters var temp map[string]interface{} if err := json.Unmarshal(obj, &temp); err != nil { - return nil, errors.Wrapf(err, "fetched resource body is not valid JSON for trigger %s", ct.Trigger.Template.Name) + return nil, fmt.Errorf("fetched resource body is not valid JSON for trigger %s, %w", ct.Trigger.Template.Name, err) } result, err := triggers.ApplyParams(obj, ct.Trigger.Template.CustomTrigger.Parameters, events) if err != nil { - return nil, errors.Wrapf(err, "failed to apply the parameters to the custom trigger resource for %s", ct.Trigger.Template.Name) + return nil, fmt.Errorf("failed to apply the parameters to the custom trigger resource for %s, %w", ct.Trigger.Template.Name, err) } ct.Logger.Debugw("resource after parameterization", zap.Any("resource", string(result))) @@ -181,7 +179,7 @@ func (ct *CustomTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Eve func (ct *CustomTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { obj, ok := resource.([]byte) if !ok { - return nil, errors.New("failed to interpret the trigger resource for the execution") + return nil, fmt.Errorf("failed to interpret the trigger resource for the execution") } ct.Logger.Debugw("resource to execute", zap.Any("resource", string(obj))) @@ -205,7 +203,7 @@ func (ct *CustomTrigger) Execute(ctx context.Context, events map[string]*v1alpha Payload: payload, }) if err != nil { - return nil, errors.Wrapf(err, "failed to execute the custom trigger resource for %s", ct.Trigger.Template.Name) + return nil, fmt.Errorf("failed to execute the custom trigger resource for %s, %w", ct.Trigger.Template.Name, err) } ct.Logger.Debugw("trigger execution response", zap.Any("response", string(result.Response))) @@ -216,7 +214,7 @@ func (ct *CustomTrigger) Execute(ctx context.Context, events map[string]*v1alpha func (ct *CustomTrigger) ApplyPolicy(ctx context.Context, resource interface{}) error { obj, ok := resource.([]byte) if !ok { - return errors.New("failed to interpret the trigger resource for the policy application") + return fmt.Errorf("failed to interpret the trigger resource for the policy application") } ct.Logger.Debugw("resource to apply policy on", zap.Any("resource", string(obj))) @@ -225,7 +223,7 @@ func (ct *CustomTrigger) ApplyPolicy(ctx context.Context, resource interface{}) Request: obj, }) if err != nil { - return errors.Wrapf(err, "failed to apply the policy for the custom trigger resource for %s", ct.Trigger.Template.Name) + return fmt.Errorf("failed to apply the policy for the custom trigger resource for %s, %w", ct.Trigger.Template.Name, err) } ct.Logger.Infow("policy application result", zap.Any("success", result.Success), zap.Any("message", result.Message)) return err diff --git a/sensors/triggers/email/email.go b/sensors/triggers/email/email.go new file mode 100644 index 0000000000..21f6426788 --- /dev/null +++ b/sensors/triggers/email/email.go @@ -0,0 +1,165 @@ +/* +Copyright 2020 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package email + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "regexp" + + notifications "github.com/argoproj/notifications-engine/pkg/services" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + "github.com/argoproj/argo-events/sensors/triggers" +) + +type EmailTrigger struct { + // Sensor refer to the sensor object + Sensor *v1alpha1.Sensor + // Trigger refers to the trigger resource + Trigger *v1alpha1.Trigger + // Logger to log stuff + Logger *zap.SugaredLogger + // emailSvc refers to the Email notification service. + emailSvc notifications.NotificationService +} + +// NewEmailTrigger returns a new Email trigger context +func NewEmailTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger) (*EmailTrigger, error) { + emailTrigger := trigger.Template.Email + var smtpPassword string + if emailTrigger.SMTPPassword != nil { + var err error + smtpPassword, err = common.GetSecretFromVolume(emailTrigger.SMTPPassword) + if err != nil { + return nil, fmt.Errorf("failed to retrieve the smtp password, %w", err) + } + } + emailSvc := notifications.NewEmailService( + notifications.EmailOptions{ + Host: emailTrigger.Host, + Port: int(emailTrigger.Port), + Username: emailTrigger.Username, + Password: smtpPassword, + From: emailTrigger.From, + }, + ) + return &EmailTrigger{ + Sensor: sensor, + Trigger: trigger, + Logger: logger.With(logging.LabelTriggerType, apicommon.EmailTrigger), + emailSvc: emailSvc, + }, nil +} + +// GetTriggerType returns the type of the trigger +func (t *EmailTrigger) GetTriggerType() apicommon.TriggerType { + return apicommon.EmailTrigger +} + +func (t *EmailTrigger) FetchResource(ctx context.Context) (interface{}, error) { + return t.Trigger.Template.Email, nil +} + +func (t *EmailTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { + resourceBytes, err := json.Marshal(resource) + if err != nil { + return nil, fmt.Errorf("failed to marshal the Email trigger resource, %w", err) + } + parameters := t.Trigger.Template.Email.Parameters + + if parameters != nil { + updatedResourceBytes, err := triggers.ApplyParams(resourceBytes, t.Trigger.Template.Email.Parameters, events) + if err != nil { + return nil, err + } + + var st *v1alpha1.EmailTrigger + if err := json.Unmarshal(updatedResourceBytes, &st); err != nil { + return nil, fmt.Errorf("failed to unmarshal the updated Email trigger resource after applying resource parameters, %w", err) + } + + return st, nil + } + + return resource, nil +} + +// Execute executes the trigger +func (t *EmailTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { + t.Logger.Info("executing EmailTrigger") + _, ok := resource.(*v1alpha1.EmailTrigger) + if !ok { + return nil, fmt.Errorf("failed to marshal the Email trigger resource") + } + + emailTrigger := t.Trigger.Template.Email + + if len(emailTrigger.To) == 0 { + return nil, fmt.Errorf("to can't be empty") + } + body := emailTrigger.Body + if body == "" { + return nil, fmt.Errorf("body can't be empty") + } + subject := emailTrigger.Subject + if subject == "" { + return nil, fmt.Errorf("subject can't be empty") + } + t.Logger.Infow("sending emails...", zap.Any("to", emailTrigger.To)) + var errs error + validEmail := regexp.MustCompile(`^[\w-\.]+@([\w-]+\.)+[\w-]{2,4}$`) + for _, addr := range emailTrigger.To { + if !validEmail.MatchString(addr) { + t.Logger.Error("invalid emailId", zap.Any("to", addr)) + errs = errors.Join(errs, fmt.Errorf("to emailId can't be invalid %v", addr)) + continue + } + notification := notifications.Notification{ + Message: emailTrigger.Body, + Email: ¬ifications.EmailNotification{ + Subject: emailTrigger.Subject, + Body: emailTrigger.Body, + }, + } + destination := notifications.Destination{ + Service: "email", + Recipient: addr, + } + err := t.emailSvc.Send(notification, destination) + if err != nil { + t.Logger.Errorw("unable to send emails to emailId", zap.Any("to", addr), zap.Error(err)) + errs = errors.Join(errs, fmt.Errorf("failed to send emails to emailId %v, %w", addr, err)) + } + } + if errs != nil { + return nil, errs + } + t.Logger.Infow("message successfully sent to emailIds", zap.Any("message", emailTrigger.Body), zap.Any("to", emailTrigger.To)) + t.Logger.Info("finished executing EmailTrigger") + return nil, nil +} + +// No Policies for EmailTrigger +func (t *EmailTrigger) ApplyPolicy(ctx context.Context, resource interface{}) error { + return nil +} diff --git a/sensors/triggers/email/email_test.go b/sensors/triggers/email/email_test.go new file mode 100644 index 0000000000..a307b44938 --- /dev/null +++ b/sensors/triggers/email/email_test.go @@ -0,0 +1,201 @@ +/* +Copyright 2020 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package email + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + "github.com/argoproj/notifications-engine/pkg/services" +) + +var sensorObj = &v1alpha1.Sensor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-sensor", + Namespace: "fake", + }, + Spec: v1alpha1.SensorSpec{ + Triggers: []v1alpha1.Trigger{ + { + Template: &v1alpha1.TriggerTemplate{ + Name: "fake-trigger", + Email: &v1alpha1.EmailTrigger{ + SMTPPassword: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "secret", + }, + Key: "password", + }, + Host: "fake-host", + Port: 468, + Username: "fake-username", + To: []string{"fake1@email.com", "fake2@email.com"}, + From: "fake-email", + Subject: "fake-subject", + Body: "fake-body", + }, + }, + }, + }, + }, +} + +func getEmailTrigger(n services.NotificationService) *EmailTrigger { + return &EmailTrigger{ + Sensor: sensorObj.DeepCopy(), + Trigger: sensorObj.Spec.Triggers[0].DeepCopy(), + Logger: logging.NewArgoEventsLogger(), + emailSvc: n, + } +} + +func TestEmailTrigger_FetchResource(t *testing.T) { + trigger := getEmailTrigger(nil) + resource, err := trigger.FetchResource(context.TODO()) + assert.Nil(t, err) + assert.NotNil(t, resource) + + ot, ok := resource.(*v1alpha1.EmailTrigger) + assert.Equal(t, true, ok) + assert.Equal(t, "fake-host", ot.Host) + assert.Equal(t, int32(468), ot.Port) + assert.Equal(t, "fake-username", ot.Username) + assert.Equal(t, []string{"fake1@email.com", "fake2@email.com"}, ot.To) + assert.Equal(t, "fake-email", ot.From) + assert.Equal(t, "fake-subject", ot.Subject) + assert.Equal(t, "fake-body", ot.Body) +} + +func TestEmailTrigger_ApplyResourceParameters(t *testing.T) { + trigger := getEmailTrigger(nil) + + testEvents := map[string]*v1alpha1.Event{ + "fake-dependency": { + Context: &v1alpha1.EventContext{ + ID: "1", + Type: "webhook", + Source: "webhook-gateway", + DataContentType: "application/json", + SpecVersion: "1.0", + Subject: "example-1", + }, + Data: []byte(`{"to": "real@email.com", "name": "Luke"}`), + }, + } + + trigger.Trigger.Template.Email.Parameters = []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "to", + }, + Dest: "to.0", + }, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "body", + DataTemplate: "Hi {{.Input.name}},\n\tHello There.\nThanks,\nObi", + }, + Dest: "body", + }, + } + + resource, err := trigger.ApplyResourceParameters(testEvents, trigger.Trigger.Template.Email) + assert.Nil(t, err) + assert.NotNil(t, resource) + + ot, ok := resource.(*v1alpha1.EmailTrigger) + assert.Equal(t, true, ok) + assert.Equal(t, "fake-host", ot.Host) + assert.Equal(t, int32(468), ot.Port) + assert.Equal(t, "fake-username", ot.Username) + assert.Equal(t, []string{"real@email.com", "fake2@email.com"}, ot.To) + assert.Equal(t, "fake-email", ot.From) + assert.Equal(t, "fake-subject", ot.Subject) + assert.Equal(t, "Hi Luke,\n\tHello There.\nThanks,\nObi", ot.Body) +} + +// Mock Notification Service that returns an error on Send +type MockNotificationServiceError struct{} + +// Mocks a send error +func (m *MockNotificationServiceError) Send(n services.Notification, d services.Destination) error { + return errors.New("") +} + +// Mock Notification Service that returns nil on Send +type MockNotificationService struct{} + +// Mocks a successful send +func (m *MockNotificationService) Send(n services.Notification, d services.Destination) error { + return nil +} + +func TestEmailTrigger_Execute(t *testing.T) { + t.Run("Unmarshallable resource", func(t *testing.T) { + trigger := getEmailTrigger(&MockNotificationService{}) + _, err := trigger.Execute(context.TODO(), map[string]*v1alpha1.Event{}, nil) + assert.NotNil(t, err) + }) + + t.Run("Empty to scenario", func(t *testing.T) { + trigger := getEmailTrigger(&MockNotificationService{}) + trigger.Trigger.Template.Email.To = make([]string, 0) + _, err := trigger.Execute(context.TODO(), map[string]*v1alpha1.Event{}, trigger.Trigger.Template.Email) + assert.NotNil(t, err) + }) + + t.Run("Invalid to scenario", func(t *testing.T) { + trigger := getEmailTrigger(&MockNotificationService{}) + trigger.Trigger.Template.Email.To = []string{"not@a@valid.email"} + _, err := trigger.Execute(context.TODO(), map[string]*v1alpha1.Event{}, trigger.Trigger.Template.Email) + assert.NotNil(t, err) + }) + + t.Run("Empty subject scenario", func(t *testing.T) { + trigger := getEmailTrigger(&MockNotificationService{}) + trigger.Trigger.Template.Email.Subject = "" + _, err := trigger.Execute(context.TODO(), map[string]*v1alpha1.Event{}, trigger.Trigger.Template.Email) + assert.NotNil(t, err) + }) + + t.Run("Empty body scenario", func(t *testing.T) { + trigger := getEmailTrigger(&MockNotificationService{}) + trigger.Trigger.Template.Email.Body = "" + _, err := trigger.Execute(context.TODO(), map[string]*v1alpha1.Event{}, trigger.Trigger.Template.Email) + assert.NotNil(t, err) + }) + + t.Run("Error when sending email", func(t *testing.T) { + trigger := getEmailTrigger(&MockNotificationServiceError{}) + _, err := trigger.Execute(context.TODO(), map[string]*v1alpha1.Event{}, trigger.Trigger.Template.Email) + assert.NotNil(t, err) + }) + + t.Run("Email send successfully", func(t *testing.T) { + trigger := getEmailTrigger(&MockNotificationService{}) + _, err := trigger.Execute(context.TODO(), map[string]*v1alpha1.Event{}, trigger.Trigger.Template.Email) + assert.Nil(t, err) + }) +} diff --git a/sensors/triggers/fetch.go b/sensors/triggers/fetch.go index c3def378df..3b003c38fb 100644 --- a/sensors/triggers/fetch.go +++ b/sensors/triggers/fetch.go @@ -17,7 +17,8 @@ limitations under the License. package triggers import ( - "github.com/pkg/errors" + "fmt" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -26,7 +27,7 @@ import ( func FetchKubernetesResource(source *v1alpha1.ArtifactLocation) (*unstructured.Unstructured, error) { if source == nil { - return nil, errors.Errorf("trigger source for k8s is empty") + return nil, fmt.Errorf("trigger source for k8s is empty") } creds, err := artifacts.GetCredentials(source) if err != nil { @@ -36,6 +37,9 @@ func FetchKubernetesResource(source *v1alpha1.ArtifactLocation) (*unstructured.U if err != nil { return nil, err } + + // uObj will either hold the resource definition stored in the trigger or just + // a stub to provide enough information to fetch the object from K8s cluster uObj, err := artifacts.FetchArtifact(reader) if err != nil { return nil, err diff --git a/sensors/triggers/http/http.go b/sensors/triggers/http/http.go index ebc3f8543b..7449f95b9a 100644 --- a/sensors/triggers/http/http.go +++ b/sensors/triggers/http/http.go @@ -19,10 +19,10 @@ import ( "bytes" "context" "encoding/json" + "fmt" "net/http" "time" - "github.com/pkg/errors" "go.uber.org/zap" "github.com/argoproj/argo-events/common" @@ -46,17 +46,17 @@ type HTTPTrigger struct { } // NewHTTPTrigger returns a new HTTP trigger -func NewHTTPTrigger(httpClients map[string]*http.Client, sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger) (*HTTPTrigger, error) { +func NewHTTPTrigger(httpClients common.StringKeyedMap[*http.Client], sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger) (*HTTPTrigger, error) { httptrigger := trigger.Template.HTTP - client, ok := httpClients[trigger.Template.Name] + client, ok := httpClients.Load(trigger.Template.Name) if !ok { client = &http.Client{} if httptrigger.TLS != nil { tlsConfig, err := common.GetTLSConfig(httptrigger.TLS) if err != nil { - return nil, errors.Wrap(err, "failed to get the tls configuration") + return nil, fmt.Errorf("failed to get the tls configuration, %w", err) } client.Transport = &http.Transport{ TLSClientConfig: tlsConfig, @@ -69,7 +69,7 @@ func NewHTTPTrigger(httpClients map[string]*http.Client, sensor *v1alpha1.Sensor } client.Timeout = timeout - httpClients[trigger.Template.Name] = client + httpClients.Store(trigger.Template.Name, client) } return &HTTPTrigger{ @@ -98,12 +98,12 @@ func (t *HTTPTrigger) FetchResource(ctx context.Context) (interface{}, error) { func (t *HTTPTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { fetchedResource, ok := resource.(*v1alpha1.HTTPTrigger) if !ok { - return nil, errors.New("failed to interpret the fetched trigger resource") + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") } resourceBytes, err := json.Marshal(fetchedResource) if err != nil { - return nil, errors.Wrap(err, "failed to marshal the http trigger resource") + return nil, fmt.Errorf("failed to marshal the http trigger resource, %w", err) } parameters := fetchedResource.Parameters if parameters != nil { @@ -113,7 +113,7 @@ func (t *HTTPTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, } var ht *v1alpha1.HTTPTrigger if err := json.Unmarshal(updatedResourceBytes, &ht); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the updated http trigger resource after applying resource parameters") + return nil, fmt.Errorf("failed to unmarshal the updated http trigger resource after applying resource parameters, %w", err) } return ht, nil } @@ -127,7 +127,7 @@ func (t *HTTPTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.E trigger, ok := resource.(*v1alpha1.HTTPTrigger) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } if (trigger.Method == http.MethodPost || trigger.Method == http.MethodPatch || trigger.Method == http.MethodPut) && trigger.Payload == nil { @@ -143,7 +143,7 @@ func (t *HTTPTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.E request, err := http.NewRequest(trigger.Method, trigger.URL, bytes.NewReader(payload)) if err != nil { - return nil, errors.Wrapf(err, "failed to construct request for %s", trigger.URL) + return nil, fmt.Errorf("failed to construct request for %s, %w", trigger.URL, err) } if trigger.Headers != nil { @@ -162,7 +162,7 @@ func (t *HTTPTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.E value, err = common.GetConfigMapFromVolume(secure.ValueFrom.ConfigMapKeyRef) } if err != nil { - return nil, errors.Wrap(err, "failed to retrieve the value for secureHeader") + return nil, fmt.Errorf("failed to retrieve the value for secureHeader, %w", err) } request.Header[secure.Name] = []string{value} } @@ -177,21 +177,21 @@ func (t *HTTPTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.E if basicAuth.Username != nil { username, err = common.GetSecretFromVolume(basicAuth.Username) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve the username") + return nil, fmt.Errorf("failed to retrieve the username, %w", err) } } if basicAuth.Password != nil { password, err = common.GetSecretFromVolume(basicAuth.Password) if !ok { - return nil, errors.Wrap(err, "failed to retrieve the password") + return nil, fmt.Errorf("failed to retrieve the password, %w", err) } } request.SetBasicAuth(username, password) } - t.Logger.Infow("making a http request...", zap.Any("url", trigger.URL)) + t.Logger.Infow("Making a http request...", zap.Any("url", trigger.URL)) return t.Client.Do(request) } @@ -203,7 +203,7 @@ func (t *HTTPTrigger) ApplyPolicy(ctx context.Context, resource interface{}) err } response, ok := resource.(*http.Response) if !ok { - return errors.New("failed to interpret the trigger execution response") + return fmt.Errorf("failed to interpret the trigger execution response") } p := policy.NewStatusPolicy(response.StatusCode, t.Trigger.Policy.Status.GetAllow()) diff --git a/sensors/triggers/kafka/kafka.go b/sensors/triggers/kafka/kafka.go index 5dff393702..6d26bf0adf 100644 --- a/sensors/triggers/kafka/kafka.go +++ b/sensors/triggers/kafka/kafka.go @@ -17,12 +17,16 @@ package kafka import ( "context" + "encoding/binary" "encoding/json" + "fmt" "strings" "time" - "github.com/Shopify/sarama" - "github.com/pkg/errors" + "github.com/hamba/avro" + "github.com/riferrei/srclient" + + "github.com/IBM/sarama" "go.uber.org/zap" "github.com/argoproj/argo-events/common" @@ -42,13 +46,18 @@ type KafkaTrigger struct { Producer sarama.AsyncProducer // Logger to log stuff Logger *zap.SugaredLogger + // Avro schema of message + schema *srclient.Schema } // NewKafkaTrigger returns a new kafka trigger context. -func NewKafkaTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, kafkaProducers map[string]sarama.AsyncProducer, logger *zap.SugaredLogger) (*KafkaTrigger, error) { +func NewKafkaTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, kafkaProducers common.StringKeyedMap[sarama.AsyncProducer], logger *zap.SugaredLogger) (*KafkaTrigger, error) { kafkatrigger := trigger.Template.Kafka + triggerLogger := logger.With(logging.LabelTriggerType, apicommon.KafkaTrigger) + + producer, ok := kafkaProducers.Load(trigger.Template.Name) + var schema *srclient.Schema - producer, ok := kafkaProducers[trigger.Template.Name] if !ok { var err error config := sarama.NewConfig() @@ -58,7 +67,7 @@ func NewKafkaTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, kafkaPr } else { version, err := sarama.ParseKafkaVersion(kafkatrigger.Version) if err != nil { - return nil, errors.Wrap(err, "failed to parse Kafka version") + return nil, fmt.Errorf("failed to parse Kafka version, %w", err) } config.Version = version } @@ -66,16 +75,21 @@ func NewKafkaTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, kafkaPr if kafkatrigger.SASL != nil { config.Net.SASL.Enable = true config.Net.SASL.Mechanism = sarama.SASLMechanism(kafkatrigger.SASL.GetMechanism()) + if config.Net.SASL.Mechanism == "SCRAM-SHA-512" { + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA512New} } + } else if config.Net.SASL.Mechanism == "SCRAM-SHA-256" { + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &common.XDGSCRAMClient{HashGeneratorFcn: common.SHA256New} } + } user, err := common.GetSecretFromVolume(kafkatrigger.SASL.UserSecret) if err != nil { - return nil, errors.Wrap(err, "Error getting user value from secret") + return nil, fmt.Errorf("error getting user value from secret, %w", err) } config.Net.SASL.User = user password, err := common.GetSecretFromVolume(kafkatrigger.SASL.PasswordSecret) if err != nil { - return nil, errors.Wrap(err, "Error getting password value from secret") + return nil, fmt.Errorf("error getting password value from secret, %w", err) } config.Net.SASL.Password = password } @@ -83,7 +97,7 @@ func NewKafkaTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, kafkaPr if kafkatrigger.TLS != nil { tlsConfig, err := common.GetTLSConfig(kafkatrigger.TLS) if err != nil { - return nil, errors.Wrap(err, "failed to get the tls configuration") + return nil, fmt.Errorf("failed to get the tls configuration, %w", err) } tlsConfig.InsecureSkipVerify = true config.Net.TLS.Config = tlsConfig @@ -112,14 +126,30 @@ func NewKafkaTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, kafkaPr return nil, err } - kafkaProducers[trigger.Template.Name] = producer + // must read from the Errors() channel or the async producer will deadlock. + go func() { + for err := range producer.Errors() { + triggerLogger.Errorf("Error happened in kafka producer", err) + } + }() + + kafkaProducers.Store(trigger.Template.Name, producer) + } + + if kafkatrigger.SchemaRegistry != nil { + var err error + schema, err = getSchemaFromRegistry(kafkatrigger.SchemaRegistry) + if err != nil { + return nil, err + } } return &KafkaTrigger{ Sensor: sensor, Trigger: trigger, Producer: producer, - Logger: logger.With(logging.LabelTriggerType, apicommon.KafkaTrigger), + Logger: triggerLogger, + schema: schema, }, nil } @@ -138,12 +168,12 @@ func (t *KafkaTrigger) FetchResource(ctx context.Context) (interface{}, error) { func (t *KafkaTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { fetchedResource, ok := resource.(*v1alpha1.KafkaTrigger) if !ok { - return nil, errors.New("failed to interpret the fetched trigger resource") + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") } resourceBytes, err := json.Marshal(fetchedResource) if err != nil { - return nil, errors.Wrap(err, "failed to marshal the kafka trigger resource") + return nil, fmt.Errorf("failed to marshal the kafka trigger resource, %w", err) } parameters := fetchedResource.Parameters if parameters != nil { @@ -153,7 +183,7 @@ func (t *KafkaTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event } var ht *v1alpha1.KafkaTrigger if err := json.Unmarshal(updatedResourceBytes, &ht); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the updated kafka trigger resource after applying resource parameters") + return nil, fmt.Errorf("failed to unmarshal the updated kafka trigger resource after applying resource parameters. %w", err) } return ht, nil } @@ -164,11 +194,11 @@ func (t *KafkaTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event func (t *KafkaTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { trigger, ok := resource.(*v1alpha1.KafkaTrigger) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } if trigger.Payload == nil { - return nil, errors.New("payload parameters are not specified") + return nil, fmt.Errorf("payload parameters are not specified") } payload, err := triggers.ConstructPayload(events, trigger.Payload) @@ -176,20 +206,27 @@ func (t *KafkaTrigger) Execute(ctx context.Context, events map[string]*v1alpha1. return nil, err } - pk := trigger.PartitioningKey - if pk == "" { - pk = trigger.URL + // Producer with avro schema + if t.schema != nil { + payload, err = avroParser(t.schema.Schema(), t.schema.ID(), payload) + if err != nil { + return nil, err + } } - t.Producer.Input() <- &sarama.ProducerMessage{ + msg := &sarama.ProducerMessage{ Topic: trigger.Topic, - Key: sarama.StringEncoder(pk), Value: sarama.ByteEncoder(payload), - Partition: trigger.Partition, Timestamp: time.Now().UTC(), } - t.Logger.Infow("successfully produced a message", zap.Any("topic", trigger.Topic), zap.Any("partition", trigger.Partition)) + if trigger.PartitioningKey != nil { + msg.Key = sarama.StringEncoder(*trigger.PartitioningKey) + } + + t.Producer.Input() <- msg + + t.Logger.Infow("successfully produced a message", zap.Any("topic", trigger.Topic)) return nil, nil } @@ -198,3 +235,45 @@ func (t *KafkaTrigger) Execute(ctx context.Context, events map[string]*v1alpha1. func (t *KafkaTrigger) ApplyPolicy(ctx context.Context, resource interface{}) error { return nil } + +func avroParser(schema string, schemaID int, payload []byte) ([]byte, error) { + var recordValue []byte + var payloadNative map[string]interface{} + + schemaAvro, err := avro.Parse(schema) + if err != nil { + return nil, err + } + + err = json.Unmarshal(payload, &payloadNative) + if err != nil { + return nil, err + } + avroNative, err := avro.Marshal(schemaAvro, payloadNative) + if err != nil { + return nil, err + } + + schemaIDBytes := make([]byte, 4) + binary.BigEndian.PutUint32(schemaIDBytes, uint32(schemaID)) + recordValue = append(recordValue, byte(0)) + recordValue = append(recordValue, schemaIDBytes...) + recordValue = append(recordValue, avroNative...) + + return recordValue, nil +} + +// getSchemaFromRegistry returns a schema from registry. +func getSchemaFromRegistry(sr *apicommon.SchemaRegistryConfig) (*srclient.Schema, error) { + schemaRegistryClient := srclient.CreateSchemaRegistryClient(sr.URL) + if sr.Auth.Username != nil && sr.Auth.Password != nil { + user, _ := common.GetSecretFromVolume(sr.Auth.Username) + password, _ := common.GetSecretFromVolume(sr.Auth.Password) + schemaRegistryClient.SetCredentials(user, password) + } + schema, err := schemaRegistryClient.GetSchema(int(sr.SchemaID)) + if err != nil { + return nil, fmt.Errorf("error getting the schema with id '%d' %s", sr.SchemaID, err) + } + return schema, nil +} diff --git a/sensors/triggers/kafka/kafka_test.go b/sensors/triggers/kafka/kafka_test.go index 69d35a28f7..92674eae7b 100644 --- a/sensors/triggers/kafka/kafka_test.go +++ b/sensors/triggers/kafka/kafka_test.go @@ -19,12 +19,13 @@ import ( "context" "testing" - "github.com/Shopify/sarama" - "github.com/Shopify/sarama/mocks" + "github.com/IBM/sarama" + "github.com/IBM/sarama/mocks" cloudevents "github.com/cloudevents/sdk-go/v2" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/argoproj/argo-events/common" "github.com/argoproj/argo-events/common/logging" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" @@ -43,7 +44,6 @@ var sensorObj = &v1alpha1.Sensor{ Kafka: &v1alpha1.KafkaTrigger{ URL: "fake-kafka-url", Topic: "fake-topic", - Partition: 0, Parameters: nil, RequiredAcks: 1, Compress: false, @@ -53,7 +53,7 @@ var sensorObj = &v1alpha1.Sensor{ }, TLS: nil, Payload: nil, - PartitioningKey: "", + PartitioningKey: nil, }, }, }, @@ -61,15 +61,14 @@ var sensorObj = &v1alpha1.Sensor{ }, } -func getFakeKafkaTrigger(producers map[string]sarama.AsyncProducer) (*KafkaTrigger, error) { +func getFakeKafkaTrigger(producers common.StringKeyedMap[sarama.AsyncProducer]) (*KafkaTrigger, error) { return NewKafkaTrigger(sensorObj.DeepCopy(), sensorObj.Spec.Triggers[0].DeepCopy(), producers, logging.NewArgoEventsLogger()) } func TestNewKafkaTrigger(t *testing.T) { producer := mocks.NewAsyncProducer(t, nil) - producers := map[string]sarama.AsyncProducer{ - "fake-trigger": producer, - } + producers := common.NewStringKeyedMap[sarama.AsyncProducer]() + producers.Store("fake-trigger", producer) trigger, err := NewKafkaTrigger(sensorObj.DeepCopy(), sensorObj.Spec.Triggers[0].DeepCopy(), producers, logging.NewArgoEventsLogger()) assert.Nil(t, err) @@ -79,9 +78,9 @@ func TestNewKafkaTrigger(t *testing.T) { func TestKafkaTrigger_FetchResource(t *testing.T) { producer := mocks.NewAsyncProducer(t, nil) - trigger, err := getFakeKafkaTrigger(map[string]sarama.AsyncProducer{ - "fake-trigger": producer, - }) + producers := common.NewStringKeyedMap[sarama.AsyncProducer]() + producers.Store("fake-trigger", producer) + trigger, err := getFakeKafkaTrigger(producers) assert.Nil(t, err) obj, err := trigger.FetchResource(context.TODO()) assert.Nil(t, err) @@ -93,9 +92,9 @@ func TestKafkaTrigger_FetchResource(t *testing.T) { func TestKafkaTrigger_ApplyResourceParameters(t *testing.T) { producer := mocks.NewAsyncProducer(t, nil) - trigger, err := getFakeKafkaTrigger(map[string]sarama.AsyncProducer{ - "fake-trigger": producer, - }) + producers := common.NewStringKeyedMap[sarama.AsyncProducer]() + producers.Store("fake-trigger", producer) + trigger, err := getFakeKafkaTrigger(producers) assert.Nil(t, err) testEvents := map[string]*v1alpha1.Event{ @@ -137,9 +136,9 @@ func TestKafkaTrigger_ApplyResourceParameters(t *testing.T) { func TestKafkaTrigger_Execute(t *testing.T) { producer := mocks.NewAsyncProducer(t, nil) - trigger, err := getFakeKafkaTrigger(map[string]sarama.AsyncProducer{ - "fake-trigger": producer, - }) + producers := common.NewStringKeyedMap[sarama.AsyncProducer]() + producers.Store("fake-trigger", producer) + trigger, err := getFakeKafkaTrigger(producers) assert.Nil(t, err) testEvents := map[string]*v1alpha1.Event{ "fake-dependency": { diff --git a/sensors/triggers/log/log.go b/sensors/triggers/log/log.go index 570d58a060..2278a61dd8 100644 --- a/sensors/triggers/log/log.go +++ b/sensors/triggers/log/log.go @@ -2,7 +2,7 @@ package log import ( "context" - "errors" + "fmt" "time" "go.uber.org/zap" @@ -39,7 +39,7 @@ func (t *LogTrigger) ApplyResourceParameters(_ map[string]*v1alpha1.Event, resou func (t *LogTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { log, ok := resource.(*v1alpha1.LogTrigger) if !ok { - return nil, errors.New("failed to interpret the fetched trigger resource") + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") } if t.shouldLog(log) { for dependencyName, event := range events { diff --git a/sensors/triggers/nats/nats.go b/sensors/triggers/nats/nats.go index 511c9d708c..602142331e 100644 --- a/sensors/triggers/nats/nats.go +++ b/sensors/triggers/nats/nats.go @@ -18,9 +18,9 @@ package nats import ( "context" "encoding/json" + "fmt" - natslib "github.com/nats-io/go-nats" - "github.com/pkg/errors" + natslib "github.com/nats-io/nats.go" "go.uber.org/zap" "github.com/argoproj/argo-events/common" @@ -43,10 +43,10 @@ type NATSTrigger struct { } // NewNATSTrigger returns new nats trigger. -func NewNATSTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, natsConnections map[string]*natslib.Conn, logger *zap.SugaredLogger) (*NATSTrigger, error) { +func NewNATSTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, natsConnections common.StringKeyedMap[*natslib.Conn], logger *zap.SugaredLogger) (*NATSTrigger, error) { natstrigger := trigger.Template.NATS - conn, ok := natsConnections[trigger.Template.Name] + conn, ok := natsConnections.Load(trigger.Template.Name) if !ok { var err error opts := natslib.GetDefaultOptions() @@ -55,7 +55,7 @@ func NewNATSTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, natsConn if natstrigger.TLS != nil { tlsConfig, err := common.GetTLSConfig(natstrigger.TLS) if err != nil { - return nil, errors.Wrap(err, "failed to get the tls configuration") + return nil, fmt.Errorf("failed to get the tls configuration, %w", err) } tlsConfig.InsecureSkipVerify = true opts.Secure = true @@ -67,7 +67,7 @@ func NewNATSTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, natsConn return nil, err } - natsConnections[trigger.Template.Name] = conn + natsConnections.Store(trigger.Template.Name, conn) } return &NATSTrigger{ @@ -93,12 +93,12 @@ func (t *NATSTrigger) FetchResource(ctx context.Context) (interface{}, error) { func (t *NATSTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { fetchedResource, ok := resource.(*v1alpha1.NATSTrigger) if !ok { - return nil, errors.New("failed to interpret the fetched trigger resource") + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") } resourceBytes, err := json.Marshal(fetchedResource) if err != nil { - return nil, errors.Wrap(err, "failed to marshal the nats trigger resource") + return nil, fmt.Errorf("failed to marshal the nats trigger resource, %w", err) } parameters := fetchedResource.Parameters if parameters != nil { @@ -108,7 +108,7 @@ func (t *NATSTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, } var ht *v1alpha1.NATSTrigger if err := json.Unmarshal(updatedResourceBytes, &ht); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the updated nats trigger resource after applying resource parameters") + return nil, fmt.Errorf("failed to unmarshal the updated nats trigger resource after applying resource parameters, %w", err) } return ht, nil } @@ -119,11 +119,11 @@ func (t *NATSTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, func (t *NATSTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { trigger, ok := resource.(*v1alpha1.NATSTrigger) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } if trigger.Payload == nil { - return nil, errors.New("payload parameters are not specified") + return nil, fmt.Errorf("payload parameters are not specified") } payload, err := triggers.ConstructPayload(events, trigger.Payload) diff --git a/sensors/triggers/params.go b/sensors/triggers/params.go index a8ca3066e7..9334e9b431 100644 --- a/sensors/triggers/params.go +++ b/sensors/triggers/params.go @@ -24,7 +24,6 @@ import ( sprig "github.com/Masterminds/sprig/v3" "github.com/ghodss/yaml" - "github.com/pkg/errors" "github.com/tidwall/gjson" "github.com/tidwall/sjson" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -33,22 +32,34 @@ import ( "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) +const ( + jsonType = "JSON" + stringType = "String" +) + // ConstructPayload constructs a payload for operations involving request and responses like HTTP request. func ConstructPayload(events map[string]*v1alpha1.Event, parameters []v1alpha1.TriggerParameter) ([]byte, error) { var payload []byte for _, parameter := range parameters { - value, err := ResolveParamValue(parameter.Src, events) + value, typ, err := ResolveParamValue(parameter.Src, events) if err != nil { return nil, err } - tmp, err := sjson.SetBytes(payload, parameter.Dest, value) - if err != nil { - return nil, err + if typ != stringType && parameter.Src.UseRawData { + tmp, err := sjson.SetRawBytes(payload, parameter.Dest, []byte(*value)) + if err != nil { + return nil, err + } + payload = tmp + } else { + tmp, err := sjson.SetBytes(payload, parameter.Dest, *value) + if err != nil { + return nil, err + } + payload = tmp } - payload = tmp } - return payload, nil } @@ -95,10 +106,13 @@ func ApplyResourceParameters(events map[string]*v1alpha1.Event, parameters []v1a func ApplyParams(jsonObj []byte, params []v1alpha1.TriggerParameter, events map[string]*v1alpha1.Event) ([]byte, error) { for _, param := range params { // let's grab the param value - v, err := ResolveParamValue(param.Src, events) + value, typ, err := ResolveParamValue(param.Src, events) if err != nil { return nil, err } + if value == nil { + continue + } switch op := param.Operation; op { case v1alpha1.TriggerParameterOpAppend, v1alpha1.TriggerParameterOpPrepend: @@ -106,10 +120,11 @@ func ApplyParams(jsonObj []byte, params []v1alpha1.TriggerParameter, events map[ current := gjson.GetBytes(jsonObj, param.Dest) if current.Exists() { + typ = stringType if op == v1alpha1.TriggerParameterOpAppend { - v = current.String() + v + *value = current.String() + *value } else { - v += current.String() + *value += current.String() } } case v1alpha1.TriggerParameterOpOverwrite, v1alpha1.TriggerParameterOpNone: @@ -117,13 +132,20 @@ func ApplyParams(jsonObj []byte, params []v1alpha1.TriggerParameter, events map[ default: return nil, fmt.Errorf("unsupported trigger parameter operation: %+v", op) } - // now let's set the value - tmp, err := sjson.SetBytes(jsonObj, param.Dest, v) - if err != nil { - return nil, err + if typ != stringType && param.Src.UseRawData { + tmp, err := sjson.SetRawBytes(jsonObj, param.Dest, []byte(*value)) + if err != nil { + return nil, err + } + jsonObj = tmp + } else { + tmp, err := sjson.SetBytes(jsonObj, param.Dest, *value) + if err != nil { + return nil, err + } + jsonObj = tmp } - jsonObj = tmp } return jsonObj, nil } @@ -158,68 +180,102 @@ func renderEventDataAsJSON(event *v1alpha1.Event) ([]byte, error) { } // helper method to resolve the parameter's value from the src +// returns value and value type (jsonType or stringType or empty string if not found). jsonType represent a block while stringType represent a single value. // returns an error if the Path is invalid/not found and the default value is nil OR if the eventDependency event doesn't exist and default value is nil -func ResolveParamValue(src *v1alpha1.TriggerParameterSource, events map[string]*v1alpha1.Event) (string, error) { +func ResolveParamValue(src *v1alpha1.TriggerParameterSource, events map[string]*v1alpha1.Event) (*string, string, error) { var err error - var value []byte + var eventPayload []byte var key string var tmplt string - if event, ok := events[src.DependencyName]; ok { - // If context or data keys are not set, return the event payload as is + var resultValue string + + event, eventExists := events[src.DependencyName] + switch { + case eventExists: + // If no data or context selection was provided if src.ContextKey == "" && src.DataKey == "" && src.DataTemplate == "" && src.ContextTemplate == "" { - value, err = json.Marshal(&event) + // Return default value if exists + if src.Value != nil { + resultValue = *src.Value + } else { + // Default value doesn't exist so return the whole event payload + eventPayload, err = json.Marshal(&event) + resultValue = string(eventPayload) + } + + if err == nil { + return &resultValue, stringType, nil + } } - // Get the context bytes + + // Get the context part of the payload if src.ContextKey != "" || src.ContextTemplate != "" { key = src.ContextKey tmplt = src.ContextTemplate - value, err = json.Marshal(&event.Context) + eventPayload, err = json.Marshal(&event.Context) } - // Get the payload bytes + + // Get the data part of the payload if src.DataKey != "" || src.DataTemplate != "" { key = src.DataKey tmplt = src.DataTemplate - value, err = renderEventDataAsJSON(event) + eventPayload, err = renderEventDataAsJSON(event) } - } else if src.Value != nil { - return *src.Value, nil + case src.Value != nil: + // Use the default value set by the user in case the event is missing + resultValue = *src.Value + return &resultValue, stringType, nil + default: + // The parameter doesn't have a default value and is referencing a dependency that is + // missing in the received events. This is not an error and may happen with || conditions. + return nil, stringType, nil } + // If the event payload parsing failed if err != nil { + // Fall back to the default value in case it exists if src.Value != nil { - fmt.Printf("failed to parse the event data, using default value. err: %+v\n", err) - return *src.Value, nil + fmt.Printf("failed to parse the event payload, using default value. err: %+v\n", err) + resultValue = *src.Value + return &resultValue, stringType, nil } - return "", err + // Otherwise, return the error + return nil, "", err } - // Get the value corresponding to specified key within JSON object - if value != nil { + // Get the value corresponding to specified key or template within event payload + if eventPayload != nil { if tmplt != "" { - out, err := getValueWithTemplate(value, tmplt) + resultValue, err = getValueWithTemplate(eventPayload, tmplt) if err == nil { - return out, nil + return &resultValue, stringType, nil } fmt.Printf("failed to execute the src event template, falling back to key or value. err: %+v\n", err) } if key != "" { - res, err := getValueByKey(value, key) + tmp, typ, err := getValueByKey(eventPayload, key) + // For block injection support + resultValue = tmp if err == nil { - return res, nil + return &resultValue, typ, nil } - fmt.Printf("Failed to get value by key: %+v\n", err) + fmt.Printf("failed to get value by key: %+v\n", err) } + // In case neither key nor template resolving was successful, fall back to the default value if exists if src.Value != nil { - return *src.Value, nil + resultValue = *src.Value + return &resultValue, stringType, nil } - return string(value), nil } - return "", errors.Wrapf(err, "unable to resolve '%s' parameter value", src.DependencyName) + + // if we got here it means that both key and template did not match the event payload + // and no default value was provided, so we need to return an error + return nil, "", fmt.Errorf("unable to resolve '%s' parameter value. err: %+v", src.DependencyName, err) } // getValueWithTemplate will attempt to execute the provided template against // the raw json bytes and then returns the result or any error func getValueWithTemplate(value []byte, templString string) (string, error) { res := gjson.ParseBytes(value) - tpl, err := template.New("param").Funcs(sprig.HermeticTxtFuncMap()).Parse(templString) + tpl, err := template.New("param").Funcs(sprig.FuncMap()).Parse(templString) if err != nil { return "", err } @@ -236,12 +292,16 @@ func getValueWithTemplate(value []byte, templString string) (string, error) { return out, nil } -// getValueByKey will return the value in the raw json bytes at the provided key, +// getValueByKey will return the value as raw json or a string and value's type at the provided key, +// Value type (jsonType or stringType or empty string). JSON represent a block while String represent a single value. // or an error if it does not exist. -func getValueByKey(value []byte, key string) (string, error) { +func getValueByKey(value []byte, key string) (string, string, error) { res := gjson.GetBytes(value, key) if res.Exists() { - return res.String(), nil + if res.Type.String() == jsonType { + return res.Raw, res.Type.String(), nil + } + return res.String(), res.Type.String(), nil } - return "", fmt.Errorf("key %s does not exist to in the event object\n", key) + return "", "", fmt.Errorf("key %s does not exist to in the event payload", key) } diff --git a/sensors/triggers/params_test.go b/sensors/triggers/params_test.go index 70f73b1756..208324d98b 100644 --- a/sensors/triggers/params_test.go +++ b/sensors/triggers/params_test.go @@ -41,13 +41,7 @@ var sensorObj = &v1alpha1.Sensor{ { Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", - K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - }, - }, + K8s: &v1alpha1.StandardK8STrigger{}, }, }, }, @@ -77,9 +71,13 @@ type Details struct { } type Payload struct { - FirstName string `json:"firstName"` - LastName string `json:"lastName"` - Details Details `json:"details"` + FirstName string `json:"firstName"` + LastName string `json:"lastName"` + Age int `json:"age"` + IsActive bool `json:"isActive"` + TypelessAge string `json:"typelessAge"` + TypelessIsActive string `json:"typelessIsActive"` + Details Details `json:"details"` } func TestConstructPayload(t *testing.T) { @@ -104,6 +102,16 @@ func TestConstructPayload(t *testing.T) { }, Data: []byte("{\"lastName\": \"foo\"}"), }, + "use-event-data-type": { + Context: &v1alpha1.EventContext{ + ID: "3", + Type: "calendar", + Source: "calendar-gateway", + DataContentType: common.MediaTypeJSON, + Subject: "example-1", + }, + Data: []byte("{\"age\": 100, \"isActive\": false, \"countries\": [\"ca\", \"us\", \"mx\"]}"), + }, } defaultFirstName := "faker" @@ -126,6 +134,36 @@ func TestConstructPayload(t *testing.T) { }, Dest: "lastName", }, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "use-event-data-type", + DataKey: "age", + UseRawData: true, + }, + Dest: "age", + }, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "use-event-data-type", + DataKey: "isActive", + UseRawData: true, + }, + Dest: "isActive", + }, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "use-event-data-type", + DataKey: "age", + }, + Dest: "typelessAge", + }, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "use-event-data-type", + DataKey: "isActive", + }, + Dest: "typelessIsActive", + }, } payloadBytes, err := ConstructPayload(testEvents, parameters) @@ -137,6 +175,10 @@ func TestConstructPayload(t *testing.T) { assert.Nil(t, err) assert.Equal(t, "fake", p.FirstName) assert.Equal(t, "foo", p.LastName) + assert.Equal(t, 100, p.Age) + assert.Equal(t, false, p.IsActive) + assert.Equal(t, "100", p.TypelessAge) + assert.Equal(t, "false", p.TypelessIsActive) parameters[0].Src.DataKey = "unknown" parameters[1].Src.DataKey = "unknown" @@ -161,7 +203,7 @@ func TestResolveParamValue(t *testing.T) { ID: "1", Time: metav1.Time{Time: time.Now().UTC()}, }, - Data: []byte("{\"name\": {\"first\": \"fake\", \"last\": \"user\"} }"), + Data: []byte("{\"name\": {\"first\": \"fake\", \"last\": \"user\"}, \"reviews\": 8, \"rating\": 4.5, \"isActive\" : true, \"isVerified\" : false, \"countries\": [\"ca\", \"us\", \"mx\"]}"), } eventBody, err := json.Marshal(event) assert.Nil(t, err) @@ -276,13 +318,40 @@ func TestResolveParamValue(t *testing.T) { }, result: "fake", }, + { + name: "UseRawData set to true - string", + source: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "name.first", + UseRawData: true, + }, + result: "fake", + }, + { + name: "UseRawData set to true - json", + source: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "name", + UseRawData: true, + }, + result: "{\"first\": \"fake\", \"last\": \"user\"}", + }, + { + name: "UseRawData set to true - list", + source: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "countries", + UseRawData: true, + }, + result: "[\"ca\", \"us\", \"mx\"]", + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - result, err := ResolveParamValue(test.source, events) + result, _, err := ResolveParamValue(test.source, events) assert.Nil(t, err) - assert.Equal(t, test.result, result) + assert.Equal(t, test.result, *result) }) } } @@ -331,7 +400,7 @@ func TestApplyParams(t *testing.T) { ID: "1", Time: metav1.Time{Time: time.Now().UTC()}, }, - Data: []byte("{\"name\": {\"first\": \"fake\", \"last\": \"user\"} }"), + Data: []byte("{\"name\": {\"first\": \"fake\", \"last\": \"user\"}, \"age\": 100, \"countries\": [\"ca\", \"us\", \"mx\"] }"), } events := map[string]*v1alpha1.Event{ @@ -404,6 +473,71 @@ func TestApplyParams(t *testing.T) { jsonObj: []byte("{\"name\": \"faker\"}"), result: []byte("{\"name\": \"fake\"}"), }, + { + name: "apply block parameters with overwrite operation - useRawDataValue false", + params: []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "name", + }, + Dest: "name", + Operation: v1alpha1.TriggerParameterOpOverwrite, + }, + }, + jsonObj: []byte("{\"name\": \"faker\"}"), + result: []byte("{\"name\": \"{\\\"first\\\": \\\"fake\\\", \\\"last\\\": \\\"user\\\"}\"}"), + }, + { + name: "apply block parameters with overwrite operation - useRawDataValue true", + params: []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "name", + UseRawData: true, + }, + Dest: "name", + Operation: v1alpha1.TriggerParameterOpOverwrite, + }, + }, + jsonObj: []byte("{\"name\": \"faker\"}"), + result: []byte("{\"name\": {\"first\": \"fake\", \"last\": \"user\"}}"), + }, + { + name: "Use raw data types", + params: []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "age", + UseRawData: true, + }, + Dest: "age", + Operation: v1alpha1.TriggerParameterOpOverwrite, + }, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "age", + UseRawData: true, + }, + Dest: "ageWithYears", + Operation: v1alpha1.TriggerParameterOpAppend, + }, + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "countries", + UseRawData: true, + }, + Dest: "countries", + Operation: v1alpha1.TriggerParameterOpAppend, + }, + }, + jsonObj: []byte("{\"age\": \"this-gets-over-written\", \"ageWithYears\": \"Years: \"}"), + result: []byte("{\"age\": 100, \"ageWithYears\": \"Years: 100\",\"countries\":[\"ca\", \"us\", \"mx\"]}"), + }, } for _, test := range tests { @@ -484,5 +618,4 @@ func TestApplyTemplateParameters(t *testing.T) { } err := ApplyTemplateParameters(testEvents, &obj.Spec.Triggers[0]) assert.Nil(t, err) - assert.Equal(t, "fake", obj.Spec.Triggers[0].Template.K8s.GroupVersionResource.Group) } diff --git a/sensors/triggers/pulsar/pulsar.go b/sensors/triggers/pulsar/pulsar.go new file mode 100644 index 0000000000..6b0956760c --- /dev/null +++ b/sensors/triggers/pulsar/pulsar.go @@ -0,0 +1,209 @@ +/* +Copyright 2021 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package pulsar + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/apache/pulsar-client-go/pulsar" + "go.uber.org/zap" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + apicommon "github.com/argoproj/argo-events/pkg/apis/common" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" + "github.com/argoproj/argo-events/sensors/triggers" +) + +// PulsarTrigger describes the trigger to place messages on Pulsar topic using a producer +type PulsarTrigger struct { + // Sensor object + Sensor *v1alpha1.Sensor + // Trigger reference + Trigger *v1alpha1.Trigger + // Pulsar async producer + Producer pulsar.Producer + // Logger to log stuff + Logger *zap.SugaredLogger +} + +// NewPulsarTrigger returns a new Pulsar trigger context. +func NewPulsarTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, pulsarProducers common.StringKeyedMap[pulsar.Producer], logger *zap.SugaredLogger) (*PulsarTrigger, error) { + pulsarTrigger := trigger.Template.Pulsar + + producer, ok := pulsarProducers.Load(trigger.Template.Name) + if !ok { + var err error + tlsTrustCertsFilePath := "" + if pulsarTrigger.TLSTrustCertsSecret != nil { + tlsTrustCertsFilePath, err = common.GetSecretVolumePath(pulsarTrigger.TLSTrustCertsSecret) + if err != nil { + logger.Errorw("failed to get TLSTrustCertsFilePath from the volume", zap.Error(err)) + return nil, err + } + } + clientOpt := pulsar.ClientOptions{ + URL: pulsarTrigger.URL, + TLSTrustCertsFilePath: tlsTrustCertsFilePath, + TLSAllowInsecureConnection: pulsarTrigger.TLSAllowInsecureConnection, + TLSValidateHostname: pulsarTrigger.TLSValidateHostname, + } + + if pulsarTrigger.AuthTokenSecret != nil { + token, err := common.GetSecretFromVolume(pulsarTrigger.AuthTokenSecret) + if err != nil { + logger.Errorw("failed to get AuthTokenSecret from the volume", zap.Error(err)) + return nil, err + } + clientOpt.Authentication = pulsar.NewAuthenticationToken(token) + } + + if len(pulsarTrigger.AuthAthenzParams) > 0 { + logger.Info("setting athenz auth option...") + if pulsarTrigger.AuthAthenzSecret != nil { + authAthenzFilePath, err := common.GetSecretVolumePath(pulsarTrigger.AuthAthenzSecret) + if err != nil { + logger.Errorw("failed to get authAthenzSecret from the volume", zap.Error(err)) + return nil, err + } + pulsarTrigger.AuthAthenzParams["privateKey"] = "file://" + authAthenzFilePath + } + clientOpt.Authentication = pulsar.NewAuthenticationAthenz(pulsarTrigger.AuthAthenzParams) + } + + if pulsarTrigger.TLS != nil { + logger.Info("setting tls auth option...") + var clientCertPath, clientKeyPath string + switch { + case pulsarTrigger.TLS.ClientCertSecret != nil && pulsarTrigger.TLS.ClientKeySecret != nil: + clientCertPath, err = common.GetSecretVolumePath(pulsarTrigger.TLS.ClientCertSecret) + if err != nil { + logger.Errorw("failed to get ClientCertPath from the volume", zap.Error(err)) + return nil, err + } + clientKeyPath, err = common.GetSecretVolumePath(pulsarTrigger.TLS.ClientKeySecret) + if err != nil { + logger.Errorw("failed to get ClientKeyPath from the volume", zap.Error(err)) + return nil, err + } + default: + return nil, fmt.Errorf("invalid TLS config") + } + clientOpt.Authentication = pulsar.NewAuthenticationTLS(clientCertPath, clientKeyPath) + } + + var client pulsar.Client + + if err := common.DoWithRetry(pulsarTrigger.ConnectionBackoff, func() error { + var err error + if client, err = pulsar.NewClient(clientOpt); err != nil { + return err + } + return nil + }); err != nil { + return nil, fmt.Errorf("failed to connect to %s for sensor %s, %w", pulsarTrigger.URL, trigger.Template.Name, err) + } + + producer, err = client.CreateProducer(pulsar.ProducerOptions{ + Topic: pulsarTrigger.Topic, + }) + if err != nil { + return nil, err + } + + pulsarProducers.Store(trigger.Template.Name, producer) + } + + return &PulsarTrigger{ + Sensor: sensor, + Trigger: trigger, + Producer: producer, + Logger: logger.With(logging.LabelTriggerType, apicommon.PulsarTrigger), + }, nil +} + +// GetTriggerType returns the type of the trigger +func (t *PulsarTrigger) GetTriggerType() apicommon.TriggerType { + return apicommon.PulsarTrigger +} + +// FetchResource fetches the trigger. As the Pulsar trigger is simply a Pulsar producer, there +// is no need to fetch any resource from external source +func (t *PulsarTrigger) FetchResource(ctx context.Context) (interface{}, error) { + return t.Trigger.Template.Pulsar, nil +} + +// ApplyResourceParameters applies parameters to the trigger resource +func (t *PulsarTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { + fetchedResource, ok := resource.(*v1alpha1.PulsarTrigger) + if !ok { + return nil, fmt.Errorf("failed to interpret the fetched trigger resource") + } + + resourceBytes, err := json.Marshal(fetchedResource) + if err != nil { + return nil, fmt.Errorf("failed to marshal the pulsar trigger resource, %w", err) + } + + parameters := fetchedResource.Parameters + if parameters != nil { + updatedResourceBytes, err := triggers.ApplyParams(resourceBytes, parameters, events) + if err != nil { + return nil, err + } + var ht *v1alpha1.PulsarTrigger + if err := json.Unmarshal(updatedResourceBytes, &ht); err != nil { + return nil, fmt.Errorf("failed to unmarshal the updated pulsar trigger resource after applying resource parameters, %w", err) + } + return ht, nil + } + return resource, nil +} + +// Execute executes the trigger +func (t *PulsarTrigger) Execute(ctx context.Context, events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { + trigger, ok := resource.(*v1alpha1.PulsarTrigger) + if !ok { + return nil, fmt.Errorf("failed to interpret the trigger resource") + } + + if trigger.Payload == nil { + return nil, fmt.Errorf("payload parameters are not specified") + } + + payload, err := triggers.ConstructPayload(events, trigger.Payload) + if err != nil { + return nil, err + } + + _, err = t.Producer.Send(ctx, &pulsar.ProducerMessage{ + Payload: payload, + }) + if err != nil { + return nil, fmt.Errorf("failed to send message to pulsar, %w", err) + } + + t.Logger.Infow("successfully produced a message", zap.Any("topic", trigger.Topic)) + + return nil, nil +} + +// ApplyPolicy applies policy on the trigger +func (t *PulsarTrigger) ApplyPolicy(ctx context.Context, resource interface{}) error { + return nil +} diff --git a/sensors/triggers/pulsar/pulsar_test.go b/sensors/triggers/pulsar/pulsar_test.go new file mode 100644 index 0000000000..8ea1aa16bb --- /dev/null +++ b/sensors/triggers/pulsar/pulsar_test.go @@ -0,0 +1,213 @@ +/* +Copyright 2021 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package pulsar + +import ( + "context" + "fmt" + "testing" + + "github.com/apache/pulsar-client-go/pulsar" + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/argoproj/argo-events/common" + "github.com/argoproj/argo-events/common/logging" + "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" +) + +type mockPulsarProducer struct { + topic string + name string + expected bool +} + +func (m *mockPulsarProducer) ExpectInputAndSucceed() { + m.expected = true +} +func (m *mockPulsarProducer) Topic() string { + return m.topic +} +func (m *mockPulsarProducer) Name() string { + return m.name +} +func (m *mockPulsarProducer) Send(context.Context, *pulsar.ProducerMessage) (pulsar.MessageID, error) { + if m.expected { + m.expected = false + return nil, nil + } + return nil, fmt.Errorf("input not expected") +} +func (m *mockPulsarProducer) SendAsync(context.Context, *pulsar.ProducerMessage, func(pulsar.MessageID, *pulsar.ProducerMessage, error)) { + +} +func (m *mockPulsarProducer) LastSequenceID() int64 { + return 0 +} +func (m *mockPulsarProducer) Flush() error { + return nil +} +func (m *mockPulsarProducer) Close() { + +} + +var sensorObj = &v1alpha1.Sensor{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fake-sensor", + Namespace: "fake", + }, + Spec: v1alpha1.SensorSpec{ + Triggers: []v1alpha1.Trigger{ + { + Template: &v1alpha1.TriggerTemplate{ + Name: "fake-trigger", + Pulsar: &v1alpha1.PulsarTrigger{ + URL: "fake-pulsar-url", + Topic: "fake-topic", + Parameters: nil, + Payload: nil, + }, + }, + }, + }, + }, +} + +func getFakePulsarTrigger(producers common.StringKeyedMap[pulsar.Producer]) (*PulsarTrigger, error) { + return NewPulsarTrigger(sensorObj.DeepCopy(), sensorObj.Spec.Triggers[0].DeepCopy(), producers, logging.NewArgoEventsLogger()) +} + +func TestNewPulsarTrigger(t *testing.T) { + producer := &mockPulsarProducer{ + topic: "fake-topic", + name: "fake-producer", + } + producers := common.NewStringKeyedMap[pulsar.Producer]() + producers.Store("fake-trigger", producer) + trigger, err := NewPulsarTrigger(sensorObj.DeepCopy(), sensorObj.Spec.Triggers[0].DeepCopy(), producers, logging.NewArgoEventsLogger()) + assert.Nil(t, err) + assert.Equal(t, trigger.Trigger.Template.Pulsar.URL, "fake-pulsar-url") + assert.Equal(t, trigger.Trigger.Template.Pulsar.Topic, "fake-topic") +} + +func TestPulsarTrigger_FetchResource(t *testing.T) { + producer := &mockPulsarProducer{ + topic: "fake-topic", + name: "fake-producer", + } + producers := common.NewStringKeyedMap[pulsar.Producer]() + producers.Store("fake-trigger", producer) + trigger, err := getFakePulsarTrigger(producers) + assert.Nil(t, err) + obj, err := trigger.FetchResource(context.TODO()) + assert.Nil(t, err) + assert.NotNil(t, obj) + trigger1, ok := obj.(*v1alpha1.PulsarTrigger) + assert.Equal(t, true, ok) + assert.Equal(t, trigger.Trigger.Template.Pulsar.URL, trigger1.URL) +} + +func TestPulsarTrigger_ApplyResourceParameters(t *testing.T) { + producer := &mockPulsarProducer{ + topic: "fake-topic", + name: "fake-producer", + } + producers := common.NewStringKeyedMap[pulsar.Producer]() + producers.Store("fake-trigger", producer) + trigger, err := getFakePulsarTrigger(producers) + assert.Nil(t, err) + + testEvents := map[string]*v1alpha1.Event{ + "fake-dependency": { + Context: &v1alpha1.EventContext{ + ID: "1", + Type: "webhook", + Source: "webhook-gateway", + DataContentType: "application/json", + SpecVersion: cloudevents.VersionV1, + Subject: "example-1", + }, + Data: []byte(`{"url": "another-fake-pulsar-url"}`), + }, + } + + defaultValue := "http://default.com" + + trigger.Trigger.Template.Pulsar.Parameters = []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "url", + Value: &defaultValue, + }, + Dest: "url", + }, + } + + resource, err := trigger.ApplyResourceParameters(testEvents, trigger.Trigger.Template.Pulsar) + assert.Nil(t, err) + assert.NotNil(t, resource) + + updatedTrigger, ok := resource.(*v1alpha1.PulsarTrigger) + assert.Nil(t, err) + assert.Equal(t, true, ok) + assert.Equal(t, "another-fake-pulsar-url", updatedTrigger.URL) +} + +func TestPulsarTrigger_Execute(t *testing.T) { + producer := &mockPulsarProducer{ + topic: "fake-topic", + name: "fake-producer", + } + producers := common.NewStringKeyedMap[pulsar.Producer]() + producers.Store("fake-trigger", producer) + trigger, err := getFakePulsarTrigger(producers) + assert.Nil(t, err) + + testEvents := map[string]*v1alpha1.Event{ + "fake-dependency": { + Context: &v1alpha1.EventContext{ + ID: "1", + Type: "webhook", + Source: "webhook-gateway", + DataContentType: "application/json", + SpecVersion: cloudevents.VersionV1, + Subject: "example-1", + }, + Data: []byte(`{"message": "world"}`), + }, + } + + defaultValue := "hello" + + trigger.Trigger.Template.Pulsar.Payload = []v1alpha1.TriggerParameter{ + { + Src: &v1alpha1.TriggerParameterSource{ + DependencyName: "fake-dependency", + DataKey: "message", + Value: &defaultValue, + }, + Dest: "message", + }, + } + + producer.ExpectInputAndSucceed() + + result, err := trigger.Execute(context.TODO(), testEvents, trigger.Trigger.Template.Pulsar) + assert.Nil(t, err) + assert.Nil(t, result) +} diff --git a/sensors/triggers/slack/slack.go b/sensors/triggers/slack/slack.go index 4acd7817ef..8af532d7c0 100644 --- a/sensors/triggers/slack/slack.go +++ b/sensors/triggers/slack/slack.go @@ -18,12 +18,11 @@ package slack import ( "context" "encoding/json" + "fmt" "net/http" "strings" - "time" - "github.com/pkg/errors" - "github.com/slack-go/slack" + notifications "github.com/argoproj/notifications-engine/pkg/services" "go.uber.org/zap" "github.com/argoproj/argo-events/common" @@ -42,15 +41,30 @@ type SlackTrigger struct { Logger *zap.SugaredLogger // http client to invoke function. httpClient *http.Client + // slackSvc refers to the Slack notification service. + slackSvc notifications.NotificationService } // NewSlackTrigger returns a new Slack trigger context func NewSlackTrigger(sensor *v1alpha1.Sensor, trigger *v1alpha1.Trigger, logger *zap.SugaredLogger, httpClient *http.Client) (*SlackTrigger, error) { + slackTrigger := trigger.Template.Slack + slackToken, err := common.GetSecretFromVolume(slackTrigger.SlackToken) + if err != nil { + return nil, fmt.Errorf("failed to retrieve the slack token, %w", err) + } + + slackSvc := notifications.NewSlackService(notifications.SlackOptions{ + Token: slackToken, + Username: slackTrigger.Sender.Username, + Icon: slackTrigger.Sender.Icon, + }) + return &SlackTrigger{ Sensor: sensor, Trigger: trigger, Logger: logger.With(logging.LabelTriggerType, apicommon.SlackTrigger), httpClient: httpClient, + slackSvc: slackSvc, }, nil } @@ -66,7 +80,7 @@ func (t *SlackTrigger) FetchResource(ctx context.Context) (interface{}, error) { func (t *SlackTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { resourceBytes, err := json.Marshal(resource) if err != nil { - return nil, errors.Wrap(err, "failed to marshal the Slack trigger resource") + return nil, fmt.Errorf("failed to marshal the Slack trigger resource, %w", err) } parameters := t.Trigger.Template.Slack.Parameters @@ -78,7 +92,7 @@ func (t *SlackTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event var st *v1alpha1.SlackTrigger if err := json.Unmarshal(updatedResourceBytes, &st); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal the updated Slack trigger resource after applying resource parameters") + return nil, fmt.Errorf("failed to unmarshal the updated Slack trigger resource after applying resource parameters, %w", err) } return st, nil @@ -92,88 +106,48 @@ func (t *SlackTrigger) Execute(ctx context.Context, events map[string]*v1alpha1. t.Logger.Info("executing SlackTrigger") _, ok := resource.(*v1alpha1.SlackTrigger) if !ok { - return nil, errors.New("failed to marshal the Slack trigger resource") + return nil, fmt.Errorf("failed to marshal the Slack trigger resource") } - slacktrigger := t.Trigger.Template.Slack + slackTrigger := t.Trigger.Template.Slack - channel := slacktrigger.Channel + channel := slackTrigger.Channel if channel == "" { - return nil, errors.New("no slack channel provided") + return nil, fmt.Errorf("no slack channel provided") } channel = strings.TrimPrefix(channel, "#") - message := slacktrigger.Message - if message == "" { - return nil, errors.New("no slack message to post") + message := slackTrigger.Message + attachments := slackTrigger.Attachments + blocks := slackTrigger.Blocks + if message == "" && attachments == "" && blocks == "" { + return nil, fmt.Errorf("no text to post: At least one of message/attachments/blocks should be provided") } - slackToken, err := common.GetSecretFromVolume(slacktrigger.SlackToken) + t.Logger.Infow("posting to channel...", zap.Any("channelName", channel)) + + notification := notifications.Notification{ + Message: message, + Slack: ¬ifications.SlackNotification{ + GroupingKey: slackTrigger.Thread.MessageAggregationKey, + NotifyBroadcast: slackTrigger.Thread.BroadcastMessageToChannel, + Blocks: blocks, + Attachments: attachments, + }, + } + destination := notifications.Destination{ + Service: "slack", + Recipient: channel, + } + err := t.slackSvc.Send(notification, destination) if err != nil { - return nil, errors.Wrap(err, "failed to retrieve the slack token") + t.Logger.Errorw("unable to post to channel", zap.Any("channelName", channel), zap.Error(err)) + return nil, fmt.Errorf("failed to post to channel %s, %w", channel, err) } - api := slack.New(slackToken, slack.OptionDebug(false)) - - t.Logger.Infow("posting to channel...", zap.Any("channelName", channel)) - for { - channelID, timestamp, err := api.PostMessage(channel, slack.MsgOptionText(message, false)) - if err != nil { - if err.Error() == "not_in_channel" { - isPrivateChannel := false - params := &slack.GetConversationsParameters{ - Limit: 200, - Types: []string{"public_channel", "private_channel"}, - ExcludeArchived: "true", - } - - for { - channels, nextCursor, err := api.GetConversations(params) - if err != nil { - switch e := err.(type) { - case *slack.RateLimitedError: - <-time.After(e.RetryAfter) - continue - default: - t.Logger.Errorw("unable to list channels", zap.Error(err)) - return nil, errors.Wrapf(err, "failed to list channels") - } - } - for _, c := range channels { - if c.Name == channel { - channelID = c.ID - isPrivateChannel = c.IsPrivate - break - } - } - if nextCursor == "" || channelID != "" { - break - } - params.Cursor = nextCursor - } - if channelID == "" { - return nil, errors.Errorf("failed to get channelID of %s", channel) - } - if isPrivateChannel { - return nil, errors.Errorf("cannot join private channel %s", channel) - } - - c, _, _, err := api.JoinConversation(channelID) - if err != nil { - t.Logger.Errorw("unable to join channel...", zap.Any("channelName", channel), zap.Any("channelID", channelID), zap.Error(err)) - return nil, errors.Wrapf(err, "failed to join channel %s", channel) - } - t.Logger.Debugw("successfully joined channel", zap.Any("channel", c)) - continue - } else { - t.Logger.Errorw("unable to post to channel...", zap.Any("channelName", channel), zap.Error(err)) - return nil, errors.Wrapf(err, "failed to post to channel %s", channel) - } - } - t.Logger.Infow("message successfully sent to channelID with timestamp", zap.Any("message", message), zap.Any("channelID", channelID), zap.Any("timestamp", timestamp)) - t.Logger.Info("finished executing SlackTrigger") - return nil, nil - } + t.Logger.Infow("message successfully sent to channel", zap.Any("message", message), zap.Any("channelName", channel)) + t.Logger.Info("finished executing SlackTrigger") + return nil, nil } // No Policies for SlackTrigger diff --git a/sensors/triggers/standard-k8s/standar-k8s.go b/sensors/triggers/standard-k8s/standard-k8s.go similarity index 77% rename from sensors/triggers/standard-k8s/standar-k8s.go rename to sensors/triggers/standard-k8s/standard-k8s.go index fc51e7b33e..b76bfbad65 100644 --- a/sensors/triggers/standard-k8s/standar-k8s.go +++ b/sensors/triggers/standard-k8s/standard-k8s.go @@ -1,9 +1,12 @@ /* Copyright 2020 BlackRock, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -20,13 +23,11 @@ import ( "time" "github.com/imdario/mergo" - "github.com/pkg/errors" "go.uber.org/zap" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" k8stypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/dynamic" @@ -35,12 +36,16 @@ import ( "github.com/argoproj/argo-events/common/logging" apicommon "github.com/argoproj/argo-events/pkg/apis/common" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" - "github.com/argoproj/argo-events/sensors/artifacts" "github.com/argoproj/argo-events/sensors/common" "github.com/argoproj/argo-events/sensors/policy" "github.com/argoproj/argo-events/sensors/triggers" ) +var clusterResources = map[string]bool{ + "namespaces": true, + "nodes": true, +} + // StandardK8STrigger implements Trigger interface for standard Kubernetes resources type StandardK8sTrigger struct { // K8sClient is kubernetes client @@ -76,40 +81,25 @@ func (k8sTrigger *StandardK8sTrigger) GetTriggerType() apicommon.TriggerType { // FetchResource fetches the trigger resource from external source func (k8sTrigger *StandardK8sTrigger) FetchResource(ctx context.Context) (interface{}, error) { trigger := k8sTrigger.Trigger - if trigger.Template.K8s.Source == nil { - return nil, errors.Errorf("trigger source for k8s is empty") - } - creds, err := artifacts.GetCredentials(trigger.Template.K8s.Source) - if err != nil { - return nil, err - } - reader, err := artifacts.GetArtifactReader(trigger.Template.K8s.Source, creds) - if err != nil { - return nil, err - } - var rObj runtime.Object - // uObj will either hold the resource definition stored in the trigger or just - // a stub to provide enough information to fetch the object from K8s cluster - uObj, err := artifacts.FetchArtifact(reader) + uObj, err := triggers.FetchKubernetesResource(trigger.Template.K8s.Source) if err != nil { return nil, err } - k8sTrigger.namespableDynamicClient = k8sTrigger.DynamicClient.Resource(schema.GroupVersionResource{ - Group: trigger.Template.K8s.GroupVersionResource.Group, - Version: trigger.Template.K8s.GroupVersionResource.Version, - Resource: trigger.Template.K8s.GroupVersionResource.Resource, - }) + gvr := triggers.GetGroupVersionResource(uObj) + k8sTrigger.namespableDynamicClient = k8sTrigger.DynamicClient.Resource(gvr) if trigger.Template.K8s.LiveObject && trigger.Template.K8s.Operation == v1alpha1.Update { objName := uObj.GetName() if objName == "" { return nil, fmt.Errorf("resource name must be specified for fetching live object") } + objNamespace := uObj.GetNamespace() - if objNamespace == "" { + _, isClusterResource := clusterResources[gvr.Resource] + if objNamespace == "" && !isClusterResource { return nil, fmt.Errorf("resource namespace must be specified for fetching live object") } rObj, err = k8sTrigger.namespableDynamicClient.Namespace(objNamespace).Get(ctx, objName, metav1.GetOptions{}) @@ -126,7 +116,7 @@ func (k8sTrigger *StandardK8sTrigger) FetchResource(ctx context.Context) (interf func (k8sTrigger *StandardK8sTrigger) ApplyResourceParameters(events map[string]*v1alpha1.Event, resource interface{}) (interface{}, error) { obj, ok := resource.(*unstructured.Unstructured) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } if err := triggers.ApplyResourceParameters(events, k8sTrigger.Trigger.Template.K8s.Parameters, obj); err != nil { return nil, err @@ -140,13 +130,17 @@ func (k8sTrigger *StandardK8sTrigger) Execute(ctx context.Context, events map[st obj, ok := resource.(*unstructured.Unstructured) if !ok { - return nil, errors.New("failed to interpret the trigger resource") + return nil, fmt.Errorf("failed to interpret the trigger resource") } - namespace := obj.GetNamespace() - // Defaults to sensor's namespace - if namespace == "" { - namespace = k8sTrigger.Sensor.Namespace + gvr := triggers.GetGroupVersionResource(obj) + namespace := "" + if _, isClusterResource := clusterResources[gvr.Resource]; !isClusterResource { + namespace = obj.GetNamespace() + // Defaults to sensor's namespace + if namespace == "" { + namespace = k8sTrigger.Sensor.Namespace + } } obj.SetNamespace(namespace) @@ -157,11 +151,7 @@ func (k8sTrigger *StandardK8sTrigger) Execute(ctx context.Context, events map[st // We might have a client from FetchResource() already, or we might not have one yet. if k8sTrigger.namespableDynamicClient == nil { - k8sTrigger.namespableDynamicClient = k8sTrigger.DynamicClient.Resource(schema.GroupVersionResource{ - Group: trigger.Template.K8s.GroupVersionResource.Group, - Version: trigger.Template.K8s.GroupVersionResource.Version, - Resource: trigger.Template.K8s.GroupVersionResource.Resource, - }) + k8sTrigger.namespableDynamicClient = k8sTrigger.DynamicClient.Resource(gvr) } switch op { @@ -174,7 +164,7 @@ func (k8sTrigger *StandardK8sTrigger) Execute(ctx context.Context, events map[st } labels["events.argoproj.io/trigger"] = trigger.Template.Name labels["events.argoproj.io/action-timestamp"] = strconv.Itoa(int(time.Now().UnixNano() / int64(time.Millisecond))) - common.ApplySensorUniquenessLabels(labels, k8sTrigger.Sensor) + common.ApplySensorLabels(labels, k8sTrigger.Sensor) if obj.GetKind() == "Workflow" { err := common.ApplyEventLabels(labels, events) @@ -194,11 +184,11 @@ func (k8sTrigger *StandardK8sTrigger) Execute(ctx context.Context, events map[st k8sTrigger.Logger.Info("object not found, creating the object...") return k8sTrigger.namespableDynamicClient.Namespace(namespace).Create(ctx, obj, metav1.CreateOptions{}) } else if err != nil { - return nil, errors.Errorf("failed to retrieve existing object. err: %+v\n", err) + return nil, fmt.Errorf("failed to retrieve existing object. err: %w", err) } if err := mergo.Merge(oldObj, obj, mergo.WithOverride); err != nil { - return nil, errors.Errorf("failed to update the object. err: %+v\n", err) + return nil, fmt.Errorf("failed to update the object. err: %w", err) } return k8sTrigger.namespableDynamicClient.Namespace(namespace).Update(ctx, oldObj, metav1.UpdateOptions{}) @@ -211,7 +201,7 @@ func (k8sTrigger *StandardK8sTrigger) Execute(ctx context.Context, events map[st k8sTrigger.Logger.Info("object not found, creating the object...") return k8sTrigger.namespableDynamicClient.Namespace(namespace).Create(ctx, obj, metav1.CreateOptions{}) } else if err != nil { - return nil, errors.Errorf("failed to retrieve existing object. err: %+v\n", err) + return nil, fmt.Errorf("failed to retrieve existing object. err: %w", err) } if k8sTrigger.Trigger.Template.K8s.PatchStrategy == "" { @@ -220,7 +210,7 @@ func (k8sTrigger *StandardK8sTrigger) Execute(ctx context.Context, events map[st body, err := obj.MarshalJSON() if err != nil { - return nil, errors.Errorf("failed to marshal object into JSON schema. err: %+v\n", err) + return nil, fmt.Errorf("failed to marshal object into JSON schema. err: %w", err) } return k8sTrigger.namespableDynamicClient.Namespace(namespace).Patch(ctx, obj.GetName(), k8sTrigger.Trigger.Template.K8s.PatchStrategy, body, metav1.PatchOptions{}) @@ -233,17 +223,17 @@ func (k8sTrigger *StandardK8sTrigger) Execute(ctx context.Context, events map[st k8sTrigger.Logger.Info("object not found, nothing to delete...") return nil, nil } else if err != nil { - return nil, errors.Errorf("failed to retrieve existing object. err: %+v\n", err) + return nil, fmt.Errorf("failed to retrieve existing object. err: %w", err) } err = k8sTrigger.namespableDynamicClient.Namespace(namespace).Delete(ctx, obj.GetName(), metav1.DeleteOptions{}) if err != nil { - return nil, errors.Errorf("failed to delete object. err: %+v\n", err) + return nil, fmt.Errorf("failed to delete object. err: %w", err) } return nil, nil default: - return nil, errors.Errorf("unknown operation type %s", string(op)) + return nil, fmt.Errorf("unknown operation type %s", string(op)) } } @@ -257,7 +247,7 @@ func (k8sTrigger *StandardK8sTrigger) ApplyPolicy(ctx context.Context, resource obj, ok := resource.(*unstructured.Unstructured) if !ok { - return errors.New("failed to interpret the trigger resource") + return fmt.Errorf("failed to interpret the trigger resource") } p := policy.NewResourceLabels(trigger, k8sTrigger.namespableDynamicClient, obj) @@ -267,16 +257,14 @@ func (k8sTrigger *StandardK8sTrigger) ApplyPolicy(ctx context.Context, resource err := p.ApplyPolicy(ctx) if err != nil { - switch err { - case wait.ErrWaitTimeout: + if wait.Interrupted(err) { if trigger.Policy.K8s.ErrorOnBackoffTimeout { - return errors.Errorf("failed to determine status of the triggered resource. setting trigger state as failed") + return fmt.Errorf("failed to determine status of the triggered resource. setting trigger state as failed") } return nil - default: + } else { return err } } - return nil } diff --git a/sensors/triggers/standard-k8s/standar-k8s_test.go b/sensors/triggers/standard-k8s/standard-k8s_test.go similarity index 97% rename from sensors/triggers/standard-k8s/standar-k8s_test.go rename to sensors/triggers/standard-k8s/standard-k8s_test.go index 6d52208b1e..b6cd2cad14 100644 --- a/sensors/triggers/standard-k8s/standar-k8s_test.go +++ b/sensors/triggers/standard-k8s/standard-k8s_test.go @@ -48,13 +48,7 @@ var sensorObj = &v1alpha1.Sensor{ { Template: &v1alpha1.TriggerTemplate{ Name: "fake-trigger", - K8s: &v1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "apps", - Version: "v1", - Resource: "deployments", - }, - }, + K8s: &v1alpha1.StandardK8STrigger{}, }, }, }, diff --git a/sensors/triggers/triggers.go b/sensors/triggers/triggers.go new file mode 100644 index 0000000000..8f617eed1c --- /dev/null +++ b/sensors/triggers/triggers.go @@ -0,0 +1,42 @@ +/* +Copyright 2020 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package triggers + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/gengo/namer" + "k8s.io/gengo/types" +) + +func GetGroupVersionResource(obj *unstructured.Unstructured) schema.GroupVersionResource { + gvk := obj.GroupVersionKind() + pluralExceptions := map[string]string{ + "EventBus": "eventbus", + } + resource := namer.NewAllLowercasePluralNamer(pluralExceptions).Name(&types.Type{ + Name: types.Name{ + Name: gvk.Kind, + }, + }) + + return schema.GroupVersionResource{ + Group: gvk.Group, + Version: gvk.Version, + Resource: resource, + } +} diff --git a/sensors/triggers/triggers_test.go b/sensors/triggers/triggers_test.go new file mode 100644 index 0000000000..dd35fe86c8 --- /dev/null +++ b/sensors/triggers/triggers_test.go @@ -0,0 +1,51 @@ +/* +Copyright 2020 BlackRock, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package triggers + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/stretchr/testify/assert" +) + +func TestGetGroupVersionResource(t *testing.T) { + deployment := newUnstructured("apps/v1", "Deployment", "fake", "test-deployment") + expectedDeploymentGVR := schema.GroupVersionResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + } + assert.Equal(t, expectedDeploymentGVR, GetGroupVersionResource(deployment)) + + ingress := newUnstructured("networking.k8s.io/v1", "Ingress", "fake", "test-ingress") + expectedIngressGVR := schema.GroupVersionResource{ + Group: "networking.k8s.io", + Version: "v1", + Resource: "ingresses", + } + assert.Equal(t, expectedIngressGVR, GetGroupVersionResource(ingress)) + + eventbus := newUnstructured("argoproj.io/v1alpha1", "EventBus", "fake", "test-eb") + expectedEventBusGVR := schema.GroupVersionResource{ + Group: "argoproj.io", + Version: "v1alpha1", + Resource: "eventbus", + } + assert.Equal(t, expectedEventBusGVR, GetGroupVersionResource(eventbus)) +} diff --git a/test/e2e/fixtures/e2e_suite.go b/test/e2e/fixtures/e2e_suite.go index 379f7803a8..ea5f78f410 100644 --- a/test/e2e/fixtures/e2e_suite.go +++ b/test/e2e/fixtures/e2e_suite.go @@ -3,6 +3,7 @@ package fixtures import ( "context" "os" + "strings" "time" "github.com/stretchr/testify/suite" @@ -35,7 +36,7 @@ const ( var ( background = metav1.DeletePropagationBackground - e2eEventBus = `apiVersion: argoproj.io/v1alpha1 + E2EEventBusSTAN = `apiVersion: argoproj.io/v1alpha1 kind: EventBus metadata: name: default @@ -43,6 +44,22 @@ spec: nats: native: auth: token` + + E2EEventBusJetstream = `apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: default +spec: + jetstream: + version: latest` + + E2EEventBusKafka = `apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: default +spec: + kafka: + url: kafka:9092` ) type E2ESuite struct { @@ -56,7 +73,15 @@ type E2ESuite struct { func (s *E2ESuite) SetupSuite() { var err error - kubeConfig, _ := os.LookupEnv(common.EnvVarKubeConfig) + + kubeConfig, found := os.LookupEnv(common.EnvVarKubeConfig) + if !found { + home, _ := os.UserHomeDir() + kubeConfig = home + "/.kube/config" + if _, err := os.Stat(kubeConfig); err != nil && os.IsNotExist(err) { + kubeConfig = "" + } + } s.restConfig, err = common.GetClientConfig(kubeConfig) s.CheckError(err) s.kubeClient, err = kubernetes.NewForConfig(s.restConfig) @@ -73,16 +98,18 @@ func (s *E2ESuite) SetupSuite() { } s.deleteResources(resources) - s.Given().EventBus(e2eEventBus). + s.Given().EventBus(GetBusDriverSpec()). When(). CreateEventBus(). WaitForEventBusReady() s.T().Log("EventBus is ready") + + time.Sleep(10 * time.Second) // give it a little extra time to be fully ready // todo: any issues with this? Otherwise, I need to increase the allowance in the backoff } func (s *E2ESuite) TearDownSuite() { s.DeleteResources() - s.Given().EventBus(e2eEventBus). + s.Given().EventBus(GetBusDriverSpec()). When(). DeleteEventBus(). Wait(3 * time.Second). @@ -146,3 +173,13 @@ func (s *E2ESuite) Given() *Given { kubeClient: s.kubeClient, } } + +func GetBusDriverSpec() string { + x := strings.ToUpper(os.Getenv("EventBusDriver")) + if x == "JETSTREAM" { + return E2EEventBusJetstream + } else if x == "KAFKA" { + return E2EEventBusKafka + } + return E2EEventBusSTAN +} diff --git a/test/e2e/fixtures/given.go b/test/e2e/fixtures/given.go index 55a7638b17..16200f3add 100644 --- a/test/e2e/fixtures/given.go +++ b/test/e2e/fixtures/given.go @@ -1,7 +1,7 @@ package fixtures import ( - "io/ioutil" + "os" "strings" "testing" @@ -91,7 +91,7 @@ func (g *Given) readResource(text string, v metav1.Object) { if strings.HasPrefix(text, "@") { file = strings.TrimPrefix(text, "@") } else { - f, err := ioutil.TempFile("", "argo-events-e2e") + f, err := os.CreateTemp("", "argo-events-e2e") if err != nil { g.t.Fatal(err) } @@ -106,7 +106,7 @@ func (g *Given) readResource(text string, v metav1.Object) { file = f.Name() } - f, err := ioutil.ReadFile(file) + f, err := os.ReadFile(file) if err != nil { g.t.Fatal(err) } diff --git a/test/e2e/fixtures/then.go b/test/e2e/fixtures/then.go index fafb595b9c..8a03a6fb2e 100644 --- a/test/e2e/fixtures/then.go +++ b/test/e2e/fixtures/then.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "testing" + "time" apierr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,26 +43,39 @@ func (t *Then) ExpectEventBusDeleted() *Then { return t } -func (t *Then) ExpectEventSourcePodLogContains(regex string) *Then { +func (t *Then) ExpectNoSensorPodFound() *Then { ctx := context.Background() - contains, err := testutil.EventSourcePodLogContains(ctx, t.kubeClient, Namespace, t.eventSource.Name, regex, defaultTimeout) + labelSelector := fmt.Sprintf("controller=sensor-controller,sensor-name=%s", t.sensor.Name) + if err := testutil.WaitForNoPodFound(ctx, t.kubeClient, Namespace, labelSelector, 20*time.Second); err != nil { + t.t.Fatalf("expected no sensor pod found: %v", err) + } + return t +} + +// look for regex match in Sensor pod's log +// if countOpt != nil, look for specific count of regex match; else look for at least one instance +func (t *Then) ExpectEventSourcePodLogContains(regex string, options ...testutil.PodLogCheckOption) *Then { + ctx := context.Background() + contains, err := testutil.EventSourcePodLogContains(ctx, t.kubeClient, Namespace, t.eventSource.Name, regex, options...) if err != nil { t.t.Fatalf("expected event source pod logs: %v", err) } if !contains { - t.t.Fatalf("expected event source pod log contains %s", regex) + t.t.Fatalf("expected event source pod log contains '%s'", regex) } return t } -func (t *Then) ExpectSensorPodLogContains(regex string) *Then { +// look for regex match in EventSource pod's log +// if countOpt != nil, look for specific count of regex match; else look for at least one instance +func (t *Then) ExpectSensorPodLogContains(regex string, options ...testutil.PodLogCheckOption) *Then { ctx := context.Background() - contains, err := testutil.SensorPodLogContains(ctx, t.kubeClient, Namespace, t.sensor.Name, regex, defaultTimeout) + contains, err := testutil.SensorPodLogContains(ctx, t.kubeClient, Namespace, t.sensor.Name, regex, options...) if err != nil { t.t.Fatalf("expected sensor pod logs: %v", err) } if !contains { - t.t.Fatalf("expected sensor pod log contains %s", regex) + t.t.Fatalf("expected sensor pod log contains '%s'", regex) } return t } diff --git a/test/e2e/fixtures/when.go b/test/e2e/fixtures/when.go index 50c5d67b16..9be842f216 100644 --- a/test/e2e/fixtures/when.go +++ b/test/e2e/fixtures/when.go @@ -76,6 +76,20 @@ func (w *When) CreateEventSource() *When { return w } +func (w *When) DeleteEventSource() *When { + w.t.Helper() + if w.eventSource == nil { + w.t.Fatal("No event source to delete") + } + w.t.Log("Deleting event source", w.eventSource.Name) + ctx := context.Background() + err := w.eventSourceClient.Delete(ctx, w.eventSource.Name, metav1.DeleteOptions{}) + if err != nil { + w.t.Fatal(err) + } + return w +} + func (w *When) CreateSensor() *When { w.t.Helper() if w.sensor == nil { @@ -92,6 +106,20 @@ func (w *When) CreateSensor() *When { return w } +func (w *When) DeleteSensor() *When { + w.t.Helper() + if w.sensor == nil { + w.t.Fatal("No sensor to delete") + } + w.t.Log("Deleting sensor", w.sensor.Name) + ctx := context.Background() + err := w.sensorClient.Delete(ctx, w.sensor.Name, metav1.DeleteOptions{}) + if err != nil { + w.t.Fatal(err) + } + return w +} + func (w *When) Wait(timeout time.Duration) *When { w.t.Helper() w.t.Log("Waiting for", timeout.String()) @@ -125,8 +153,10 @@ func (w *When) WaitForEventBusReady() *When { if err := testutil.WaitForEventBusReady(ctx, w.eventBusClient, w.eventBus.Name, defaultTimeout); err != nil { w.t.Fatal(err) } - if err := testutil.WaitForEventBusStatefulSetReady(ctx, w.kubeClient, Namespace, w.eventBus.Name, defaultTimeout); err != nil { - w.t.Fatal(err) + if w.eventBus.Spec.Kafka == nil { // not needed for kafka (exotic only) + if err := testutil.WaitForEventBusStatefulSetReady(ctx, w.kubeClient, Namespace, w.eventBus.Name, 2*time.Minute); err != nil { + w.t.Fatal(err) + } } return w } diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go index c85c12d83a..04ee82b28d 100644 --- a/test/e2e/functional_test.go +++ b/test/e2e/functional_test.go @@ -1,9 +1,10 @@ -// +build functional +//go:build functional package e2e import ( "crypto/tls" + "fmt" "net/http" "testing" "time" @@ -18,13 +19,16 @@ type FunctionalSuite struct { } const ( - LogEventSourceStarted = "Eventing server started." - LogSensorStarted = "Sensor started." - LogPublishEventSuccessful = "succeeded to publish an event" - LogTriggerActionSuccessful = "successfully processed the trigger" - LogTriggerActionFailed = "failed to execute a trigger" + LogEventSourceStarted = "Eventing server started." + LogSensorStarted = "Sensor started." + LogPublishEventSuccessful = "Succeeded to publish an event" + LogTriggerActionFailed = "Failed to execute a trigger" ) +func LogTriggerActionSuccessful(triggerName string) string { + return fmt.Sprintf("Successfully processed trigger '%s'", triggerName) +} + func (s *FunctionalSuite) e(baseURL string) *httpexpect.Expect { return httpexpect. WithConfig(httpexpect.Config{ @@ -43,118 +47,139 @@ func (s *FunctionalSuite) e(baseURL string) *httpexpect.Expect { Builder(func(req *httpexpect.Request) {}) } -func (s *FunctionalSuite) TestCreateCalendarEventSource() { - s.Given().EventSource("@testdata/es-calendar.yaml"). - When(). - CreateEventSource(). - WaitForEventSourceReady(). - Then(). - ExpectEventSourcePodLogContains(LogPublishEventSuccessful) - - s.Given().Sensor("@testdata/sensor-log.yaml"). - When(). - CreateSensor(). - WaitForSensorReady(). - Then(). - ExpectSensorPodLogContains(LogTriggerActionSuccessful) -} - -func (s *FunctionalSuite) TestCreateCalendarEventSourceWithHA() { - s.Given().EventSource("@testdata/es-calendar-ha.yaml"). - When(). - CreateEventSource(). - WaitForEventSourceReady(). - Wait(3 * time.Second). - Then(). - ExpectEventSourcePodLogContains(LogPublishEventSuccessful) - - s.Given().Sensor("@testdata/sensor-log-ha.yaml"). - When(). - CreateSensor(). - WaitForSensorReady(). - Wait(3 * time.Second). - Then(). - ExpectSensorPodLogContains(LogTriggerActionSuccessful) -} +// func (s *FunctionalSuite) TestCreateCalendarEventSource() { +// t1 := s.Given().EventSource("@testdata/es-calendar.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady(). +// Then(). +// ExpectEventSourcePodLogContains(LogPublishEventSuccessful) + +// defer t1.When().DeleteEventSource() + +// t2 := s.Given().Sensor("@testdata/sensor-log.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady(). +// Then(). +// ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger")) + +// defer t2.When().DeleteSensor() +// } + +// func (s *FunctionalSuite) TestCreateCalendarEventSourceWithHA() { +// for _, test := range []struct { +// es, s string +// }{ +// {"@testdata/es-calendar-ha.yaml", "@testdata/sensor-log-ha.yaml"}, +// {"@testdata/es-calendar-ha-k8s.yaml", "@testdata/sensor-log-ha-k8s.yaml"}, +// } { +// t1 := s.Given().EventSource(test.es). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady(). +// Wait(3 * time.Second). +// Then(). +// ExpectEventSourcePodLogContains(LogPublishEventSuccessful) + +// defer t1.When().DeleteEventSource() + +// t2 := s.Given().Sensor(test.s). +// When(). +// CreateSensor(). +// WaitForSensorReady(). +// Wait(3 * time.Second). +// Then(). +// ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger")) +// defer t2.When().DeleteSensor() +// } +// } + +// func (s *FunctionalSuite) TestMetricsWithCalendar() { +// w1 := s.Given().EventSource("@testdata/es-calendar-metrics.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady() + +// defer w1.DeleteEventSource() + +// w1.Then(). +// ExpectEventSourcePodLogContains(LogEventSourceStarted) + +// defer w1.Then().EventSourcePodPortForward(17777, 7777).TerminateAllPodPortForwards() + +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful) + +// // EventSource POD metrics +// s.e("http://localhost:17777").GET("/metrics"). +// Expect(). +// Status(200). +// Body(). +// Contains("argo_events_event_service_running_total"). +// Contains("argo_events_events_sent_total"). +// Contains("argo_events_event_processing_duration_milliseconds") + +// w2 := s.Given().Sensor("@testdata/sensor-log-metrics.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w2.DeleteSensor() + +// w2.Then(). +// ExpectSensorPodLogContains(LogSensorStarted) +// defer w2.Then().SensorPodPortForward(17778, 7777).TerminateAllPodPortForwards() + +// w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger")) + +// // Sensor POD metrics +// s.e("http://localhost:17778").GET("/metrics"). +// Expect(). +// Status(200). +// Body(). +// Contains("argo_events_action_triggered_total"). +// Contains("argo_events_action_duration_milliseconds") +// } -func (s *FunctionalSuite) TestMetricsWithCalendar() { - t1 := s.Given().EventSource("@testdata/es-calendar-metrics.yaml"). +func (s *FunctionalSuite) TestMetricsWithWebhook() { + w1 := s.Given().EventSource("@testdata/es-test-metrics-webhook.yaml"). When(). CreateEventSource(). - WaitForEventSourceReady(). - Then(). - ExpectEventSourcePodLogContains(LogEventSourceStarted). - EventSourcePodPortForward(7777, 7777) + WaitForEventSourceReady() - defer t1.TerminateAllPodPortForwards() + defer w1.DeleteEventSource() - t1.ExpectEventSourcePodLogContains(LogPublishEventSuccessful) + w1.Then().ExpectEventSourcePodLogContains(LogEventSourceStarted) - // EventSource POD metrics - s.e("http://localhost:7777").GET("/metrics"). - Expect(). - Status(200). - Body(). - Contains("argo_events_event_service_running_total"). - Contains("argo_events_events_sent_total"). - Contains("argo_events_event_processing_duration_milliseconds") + defer w1.Then(). + EventSourcePodPortForward(12300, 12000). + EventSourcePodPortForward(7717, 7777).TerminateAllPodPortForwards() - t2 := s.Given().Sensor("@testdata/sensor-log-metrics.yaml"). + w2 := s.Given().Sensor("@testdata/sensor-test-metrics.yaml"). When(). CreateSensor(). - WaitForSensorReady(). - Then(). - ExpectSensorPodLogContains(LogSensorStarted). - SensorPodPortForward(7778, 7777) + WaitForSensorReady() - defer t2.TerminateAllPodPortForwards() + defer w2.DeleteSensor() + w2.Then().ExpectSensorPodLogContains(LogSensorStarted) - t2.ExpectSensorPodLogContains(LogTriggerActionSuccessful) + defer w2.Then(). + SensorPodPortForward(7718, 7777).TerminateAllPodPortForwards() - // Sensor POD metrics - s.e("http://localhost:7778").GET("/metrics"). - Expect(). - Status(200). - Body(). - Contains("argo_events_action_triggered_total"). - Contains("argo_events_action_duration_milliseconds") -} - -func (s *FunctionalSuite) TestMetricsWithWebhook() { - t1 := s.Given().EventSource("@testdata/es-test-metrics-webhook.yaml"). - When(). - CreateEventSource(). - WaitForEventSourceReady(). - Then(). - ExpectEventSourcePodLogContains(LogEventSourceStarted). - EventSourcePodPortForward(12000, 12000). - EventSourcePodPortForward(7777, 7777) - - defer t1.TerminateAllPodPortForwards() - - t2 := s.Given().Sensor("@testdata/sensor-test-metrics.yaml"). - When(). - CreateSensor(). - WaitForSensorReady(). - Then(). - ExpectSensorPodLogContains(LogSensorStarted). - SensorPodPortForward(7778, 7777) + time.Sleep(3 * time.Second) - defer t2.TerminateAllPodPortForwards() - - s.e("http://localhost:12000").POST("/example").WithBytes([]byte("{}")). + s.e("http://localhost:12300").POST("/example").WithBytes([]byte("{}")). Expect(). Status(200) - t1.ExpectEventSourcePodLogContains(LogPublishEventSuccessful) + w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful) // Post something invalid - s.e("http://localhost:12000").POST("/example").WithBytes([]byte("Invalid JSON")). + s.e("http://localhost:12300").POST("/example").WithBytes([]byte("Invalid JSON")). Expect(). Status(400) // EventSource POD metrics - s.e("http://localhost:7777").GET("/metrics"). + s.e("http://localhost:7717").GET("/metrics"). Expect(). Status(200). Body(). @@ -164,11 +189,11 @@ func (s *FunctionalSuite) TestMetricsWithWebhook() { Contains("argo_events_events_processing_failed_total") // Expect to see 1 success and 1 failure - t2.ExpectSensorPodLogContains(LogTriggerActionSuccessful). + w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger")). ExpectSensorPodLogContains(LogTriggerActionFailed) // Sensor POD metrics - s.e("http://localhost:7778").GET("/metrics"). + s.e("http://localhost:7718").GET("/metrics"). Expect(). Status(200). Body(). @@ -177,29 +202,424 @@ func (s *FunctionalSuite) TestMetricsWithWebhook() { Contains("argo_events_action_failed_total") } -func (s *FunctionalSuite) TestResourceEventSource() { - w1 := s.Given().EventSource("@testdata/es-resource.yaml"). - When(). - CreateEventSource(). - WaitForEventSourceReady(). - Exec("kubectl", []string{"-n", fixtures.Namespace, "run", "test-pod", "--image", "hello-world", "-l", fixtures.Label + "=" + fixtures.LabelValue}, fixtures.OutputRegexp(`pod/.* created`)) - - t1 := w1.Then(). - ExpectEventSourcePodLogContains(LogEventSourceStarted) - - t2 := s.Given().Sensor("@testdata/sensor-resource.yaml"). - When(). - CreateSensor(). - WaitForSensorReady(). - Then(). - ExpectSensorPodLogContains(LogSensorStarted) - - w1.Exec("kubectl", []string{"-n", fixtures.Namespace, "delete", "pod", "test-pod"}, fixtures.OutputRegexp(`pod "test-pod" deleted`)) - - t1.ExpectEventSourcePodLogContains(LogPublishEventSuccessful) - - t2.ExpectSensorPodLogContains(LogTriggerActionSuccessful) -} +// func (s *FunctionalSuite) TestResourceEventSource() { +// w1 := s.Given().EventSource("@testdata/es-resource.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady(). +// Exec("kubectl", []string{"-n", fixtures.Namespace, "run", "test-pod", "--image", "hello-world", "-l", fixtures.Label + "=" + fixtures.LabelValue}, fixtures.OutputRegexp(`pod/.* created`)) + +// t1 := w1.Then(). +// ExpectEventSourcePodLogContains(LogEventSourceStarted) +// defer t1.When().DeleteEventSource() + +// t2 := s.Given().Sensor("@testdata/sensor-resource.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady(). +// Then(). +// ExpectSensorPodLogContains(LogSensorStarted) +// defer t2.When().DeleteSensor() + +// w1.Exec("kubectl", []string{"-n", fixtures.Namespace, "delete", "pod", "test-pod"}, fixtures.OutputRegexp(`pod "test-pod" deleted`)) + +// t1.ExpectEventSourcePodLogContains(LogPublishEventSuccessful) + +// t2.ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger")) +// } + +// func (s *FunctionalSuite) TestMultiDependencyConditions() { + +// w1 := s.Given().EventSource("@testdata/es-multi-dep.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady() +// defer w1.DeleteEventSource() + +// w1.Then().ExpectEventSourcePodLogContains(LogEventSourceStarted) + +// defer w1.Then(). +// EventSourcePodPortForward(12011, 12000). +// EventSourcePodPortForward(13011, 13000). +// EventSourcePodPortForward(14011, 14000). +// TerminateAllPodPortForwards() + +// w2 := s.Given().Sensor("@testdata/sensor-multi-dep.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w2.DeleteSensor() + +// w2.Then().ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// time.Sleep(3 * time.Second) + +// // need to verify the conditional logic is working successfully +// // If we trigger test-dep-1 (port 12000) we should see log-trigger-2 but not log-trigger-1 +// s.e("http://localhost:12011").POST("/example1").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1)) + +// w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-2"), util.PodLogCheckOptionWithCount(1)). +// ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1"), util.PodLogCheckOptionWithCount(0)) + +// // Then if we trigger test-dep-2 we should see log-trigger-2 +// s.e("http://localhost:13011").POST("/example2").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1"), util.PodLogCheckOptionWithCount(1)) + +// // Then we trigger test-dep-2 again and shouldn't see anything +// s.e("http://localhost:13011").POST("/example2").WithBytes([]byte("{}")). +// Expect(). +// Status(200) +// w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1"), util.PodLogCheckOptionWithCount(1)) + +// // Finally trigger test-dep-3 and we should see log-trigger-1.. +// s.e("http://localhost:14011").POST("/example3").WithBytes([]byte("{}")). +// Expect(). +// Status(200) +// w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1"), util.PodLogCheckOptionWithCount(2)) +// } + +// // Start Pod with a multidependency condition +// // send it one dependency +// // verify that if it goes down and comes back up it triggers when sent the other part of the condition +// func (s *FunctionalSuite) TestDurableConsumer() { +// if fixtures.GetBusDriverSpec() == fixtures.E2EEventBusSTAN { +// s.T().SkipNow() // todo: TestDurableConsumer() is being skipped for now due to it not reliably passing with the STAN bus +// // (because when Sensor pod restarts it sometimes takes a little while for the STAN bus to resend the message to the durable consumer) +// } + +// w1 := s.Given().EventSource("@testdata/es-durable-consumer.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady() +// defer w1.DeleteEventSource() + +// w1.Then().ExpectEventSourcePodLogContains(LogEventSourceStarted) + +// defer w1.Then(). +// EventSourcePodPortForward(12102, 12000). +// EventSourcePodPortForward(13102, 13000).TerminateAllPodPortForwards() + +// w2 := s.Given().Sensor("@testdata/sensor-durable-consumer.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() + +// w2.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// // test-dep-1 +// s.e("http://localhost:12102").POST("/example1").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1)) + +// // delete the Sensor +// w2.DeleteSensor().Then().ExpectNoSensorPodFound() + +// w3 := s.Given().Sensor("@testdata/sensor-durable-consumer.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w3.DeleteSensor() + +// w3.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// // test-dep-2 +// s.e("http://localhost:13102").POST("/example2").WithBytes([]byte("{}")). +// Expect(). +// Status(200) +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(2)) +// w3.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1"), util.PodLogCheckOptionWithCount(1)) +// } + +// func (s *FunctionalSuite) TestMultipleSensors() { +// // Start two sensors which each use "A && B", but staggered in time such that one receives the partial condition +// // Then send the other part of the condition and verify that only one triggers + +// // Start EventSource +// w1 := s.Given().EventSource("@testdata/es-multi-sensor.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady() +// defer w1.DeleteEventSource() + +// w1.Then(). +// ExpectEventSourcePodLogContains(LogEventSourceStarted) + +// defer w1.Then().EventSourcePodPortForward(12003, 12000). +// EventSourcePodPortForward(13003, 13000). +// EventSourcePodPortForward(14003, 14000).TerminateAllPodPortForwards() + +// // Start one Sensor +// w2 := s.Given().Sensor("@testdata/sensor-multi-sensor.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w2.DeleteSensor() + +// w2.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// time.Sleep(3 * time.Second) + +// // Trigger first dependency +// // test-dep-1 +// s.e("http://localhost:12003").POST("/example1").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1)) + +// // Start second Sensor +// w3 := s.Given().Sensor("@testdata/sensor-multi-sensor-2.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w3.DeleteSensor() + +// w3.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// // Trigger second dependency +// // test-dep-2 +// s.e("http://localhost:13003").POST("/example2").WithBytes([]byte("{}")). +// Expect(). +// Status(200) +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(2)) + +// // Verify trigger occurs for first Sensor and not second +// w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1")) +// w3.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1"), util.PodLogCheckOptionWithCount(0)) + +// } + +// func (s *FunctionalSuite) TestAtLeastOnce() { +// // Send an event to a sensor with a failing trigger and make sure it doesn't ACK it. +// // Delete the sensor and launch sensor with same name and non-failing trigger so it ACKS it. + +// if fixtures.GetBusDriverSpec() == fixtures.E2EEventBusSTAN { +// s.T().SkipNow() // Skipping because AtLeastOnce does not apply for NATS. +// } + +// w1 := s.Given().EventSource("@testdata/es-webhook.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady() + +// defer w1.DeleteEventSource() + +// w1.Then(). +// ExpectEventSourcePodLogContains(LogEventSourceStarted) + +// defer w1.Then().EventSourcePodPortForward(12006, 12000). +// TerminateAllPodPortForwards() + +// w2 := s.Given().Sensor("@testdata/sensor-atleastonce-fail.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// w2.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) +// time.Sleep(3 * time.Second) +// s.e("http://localhost:12006").POST("/example").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1)) +// w2.Then().ExpectSensorPodLogContains("Making a http request...") +// time.Sleep(5 * time.Second) // make sure we defintely attempt to trigger + +// w2.DeleteSensor() +// time.Sleep(10 * time.Second) + +// w3 := s.Given().Sensor("@testdata/sensor-atleastonce-succeed.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w3.DeleteSensor() + +// w3.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// w3.Then(). +// ExpectSensorPodLogContains(LogTriggerActionSuccessful("trigger-atleastonce")) +// } + +// func (s *FunctionalSuite) TestAtMostOnce() { +// // Send an event to a sensor with a failing trigger but it will ACK it. +// // Delete the sensor and launch sensor with same name and non-failing trigger +// // to see that the event doesn't come through. + +// w1 := s.Given().EventSource("@testdata/es-webhook.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady() +// defer w1.DeleteEventSource() +// w1.Then(). +// ExpectEventSourcePodLogContains(LogEventSourceStarted) + +// defer w1.Then().EventSourcePodPortForward(12007, 12000). +// TerminateAllPodPortForwards() + +// w2 := s.Given().Sensor("@testdata/sensor-atmostonce-fail.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// w2.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) +// time.Sleep(3 * time.Second) +// s.e("http://localhost:12007").POST("/example").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1)) +// w2.Then().ExpectSensorPodLogContains("Making a http request...") +// time.Sleep(5 * time.Second) // make sure we defintely attempt to trigger + +// w2.DeleteSensor() +// time.Sleep(10 * time.Second) + +// w3 := s.Given().Sensor("@testdata/sensor-atmostonce-succeed.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w3.DeleteSensor() + +// w3.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// w3.Then(). +// ExpectSensorPodLogContains(LogTriggerActionSuccessful("trigger-atmostonce"), util.PodLogCheckOptionWithCount(0)) +// } + +// func (s *FunctionalSuite) TestMultipleSensorAtLeastOnceTrigger() { +// // Start two sensors which each use "A && B", but staggered in time such that one receives the partial condition +// // Then send the other part of the condition and verify that only one triggers +// // With AtLeastOnce flag set. + +// w1 := s.Given().EventSource("@testdata/es-multi-sensor.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady() +// defer w1.DeleteEventSource() + +// w1.Then(). +// ExpectEventSourcePodLogContains(LogEventSourceStarted) + +// defer w1.Then().EventSourcePodPortForward(12004, 12000). +// EventSourcePodPortForward(13004, 13000). +// EventSourcePodPortForward(14004, 14000).TerminateAllPodPortForwards() + +// // Start one Sensor +// w2 := s.Given().Sensor("@testdata/sensor-multi-sensor-atleastonce.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w2.DeleteSensor() + +// w2.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// time.Sleep(3 * time.Second) + +// // Trigger first dependency +// // test-dep-1 +// s.e("http://localhost:12004").POST("/example1").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1)) + +// // Start second Sensor +// w3 := s.Given().Sensor("@testdata/sensor-multi-sensor-2-atleastonce.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady() +// defer w3.DeleteSensor() + +// w3.Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// // Trigger second dependency +// // test-dep-2 +// s.e("http://localhost:13004").POST("/example2").WithBytes([]byte("{}")). +// Expect(). +// Status(200) +// w1.Then().ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(2)) + +// // Verify trigger occurs for first Sensor and not second +// w2.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1-atleastonce")) +// w3.Then().ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1-atleastonce"), util.PodLogCheckOptionWithCount(0)) +// } + +// func (s *FunctionalSuite) TestTriggerSpecChange() { +// // Start a sensor which uses "A && B"; send A; replace the Sensor with a new spec which uses A; send C and verify that there's no trigger + +// // Start EventSource +// t1 := s.Given().EventSource("@testdata/es-trigger-spec-change.yaml"). +// When(). +// CreateEventSource(). +// WaitForEventSourceReady(). +// Then(). +// ExpectEventSourcePodLogContains(LogEventSourceStarted). +// EventSourcePodPortForward(12005, 12000). +// EventSourcePodPortForward(13005, 13000). +// EventSourcePodPortForward(14005, 14000) + +// defer t1.When().DeleteEventSource() +// defer t1.TerminateAllPodPortForwards() + +// // Start one Sensor + +// t2 := s.Given().Sensor("@testdata/sensor-trigger-spec-change.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady(). +// Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// time.Sleep(3 * time.Second) + +// // Trigger first dependency +// // test-dep-1 +// s.e("http://localhost:12005").POST("/example").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// t1.ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(1)) + +// t2.When().DeleteSensor().Then().ExpectNoSensorPodFound() + +// // Change Sensor's spec +// t2 = s.Given().Sensor("@testdata/sensor-trigger-spec-change-2.yaml"). +// When(). +// CreateSensor(). +// WaitForSensorReady(). +// Then(). +// ExpectSensorPodLogContains(LogSensorStarted, util.PodLogCheckOptionWithCount(1)) + +// defer t2.When().DeleteSensor() + +// time.Sleep(3 * time.Second) + +// // test-dep-3 +// s.e("http://localhost:14005").POST("/example").WithBytes([]byte("{}")). +// Expect(). +// Status(200) + +// t1.ExpectEventSourcePodLogContains(LogPublishEventSuccessful, util.PodLogCheckOptionWithCount(2)) +// // Verify no Trigger this time since test-dep-1 should have been cleared +// t2.ExpectSensorPodLogContains(LogTriggerActionSuccessful("log-trigger-1"), util.PodLogCheckOptionWithCount(0)) +// } func TestFunctionalSuite(t *testing.T) { suite.Run(t, new(FunctionalSuite)) diff --git a/test/e2e/testdata/es-calendar-ha-k8s.yaml b/test/e2e/testdata/es-calendar-ha-k8s.yaml new file mode 100644 index 0000000000..6e0e61f640 --- /dev/null +++ b/test/e2e/testdata/es-calendar-ha-k8s.yaml @@ -0,0 +1,13 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + annotations: + events.argoproj.io/leader-election: k8s + name: e2e-calendar-ha-k8s +spec: + replicas: 2 + template: + serviceAccountName: argo-events-sa + calendar: + example: + interval: 2s diff --git a/test/e2e/testdata/es-calendar-ha.yaml b/test/e2e/testdata/es-calendar-ha.yaml index 4098eb32ce..11b3e1e731 100644 --- a/test/e2e/testdata/es-calendar-ha.yaml +++ b/test/e2e/testdata/es-calendar-ha.yaml @@ -8,4 +8,4 @@ spec: serviceAccountName: argo-events-sa calendar: example: - interval: 2s + interval: 10s diff --git a/test/e2e/testdata/es-calendar-metrics.yaml b/test/e2e/testdata/es-calendar-metrics.yaml index de65f53fca..9e8ce5331e 100644 --- a/test/e2e/testdata/es-calendar-metrics.yaml +++ b/test/e2e/testdata/es-calendar-metrics.yaml @@ -3,6 +3,8 @@ kind: EventSource metadata: name: e2e-calendar-metrics spec: + template: + serviceAccountName: argo-events-sa calendar: example: - interval: 2s + interval: 10s diff --git a/test/e2e/testdata/es-calendar.yaml b/test/e2e/testdata/es-calendar.yaml index 17c9e4f364..f4c3365635 100644 --- a/test/e2e/testdata/es-calendar.yaml +++ b/test/e2e/testdata/es-calendar.yaml @@ -3,6 +3,8 @@ kind: EventSource metadata: name: e2e-calendar spec: + template: + serviceAccountName: argo-events-sa calendar: example: - interval: 2s + interval: 10s diff --git a/test/e2e/testdata/es-durable-consumer.yaml b/test/e2e/testdata/es-durable-consumer.yaml new file mode 100644 index 0000000000..01c0790514 --- /dev/null +++ b/test/e2e/testdata/es-durable-consumer.yaml @@ -0,0 +1,20 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: e2e-durable-consumer +spec: + template: + serviceAccountName: argo-events-sa + webhook: + example1: + port: "12000" + endpoint: /example1 + method: POST + example2: + port: "13000" + endpoint: /example2 + method: POST + example3: + port: "14000" + endpoint: /example3 + method: POST diff --git a/test/e2e/testdata/es-multi-dep.yaml b/test/e2e/testdata/es-multi-dep.yaml new file mode 100644 index 0000000000..b62e7208fc --- /dev/null +++ b/test/e2e/testdata/es-multi-dep.yaml @@ -0,0 +1,20 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: e2e-multi-dep +spec: + template: + serviceAccountName: argo-events-sa + webhook: + example1: + port: "12000" + endpoint: /example1 + method: POST + example2: + port: "13000" + endpoint: /example2 + method: POST + example3: + port: "14000" + endpoint: /example3 + method: POST diff --git a/test/e2e/testdata/es-multi-sensor.yaml b/test/e2e/testdata/es-multi-sensor.yaml new file mode 100644 index 0000000000..f6a26eda40 --- /dev/null +++ b/test/e2e/testdata/es-multi-sensor.yaml @@ -0,0 +1,20 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: e2e-multi-sensor +spec: + template: + serviceAccountName: argo-events-sa + webhook: + example1: + port: "12000" + endpoint: /example1 + method: POST + example2: + port: "13000" + endpoint: /example2 + method: POST + example3: + port: "14000" + endpoint: /example3 + method: POST diff --git a/test/e2e/testdata/es-test-metrics-webhook.yaml b/test/e2e/testdata/es-test-metrics-webhook.yaml index 5c4e970ab5..8dbdc4ee2f 100644 --- a/test/e2e/testdata/es-test-metrics-webhook.yaml +++ b/test/e2e/testdata/es-test-metrics-webhook.yaml @@ -3,6 +3,8 @@ kind: EventSource metadata: name: e2e-test-metrics-webhook spec: + template: + serviceAccountName: argo-events-sa webhook: example: port: "12000" diff --git a/test/e2e/testdata/es-trigger-spec-change.yaml b/test/e2e/testdata/es-trigger-spec-change.yaml new file mode 100644 index 0000000000..03ad5070d8 --- /dev/null +++ b/test/e2e/testdata/es-trigger-spec-change.yaml @@ -0,0 +1,20 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: e2e-trigger-spec-change +spec: + template: + serviceAccountName: argo-events-sa + webhook: + example1: + port: "12000" + endpoint: /example + method: POST + example2: + port: "13000" + endpoint: /example + method: POST + example3: + port: "14000" + endpoint: /example + method: POST \ No newline at end of file diff --git a/test/e2e/testdata/es-webhook.yaml b/test/e2e/testdata/es-webhook.yaml new file mode 100644 index 0000000000..d17b00971e --- /dev/null +++ b/test/e2e/testdata/es-webhook.yaml @@ -0,0 +1,12 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventSource +metadata: + name: e2e-webhook +spec: + template: + serviceAccountName: argo-events-sa + webhook: + example: + port: "12000" + endpoint: /example + method: POST diff --git a/test/e2e/testdata/sensor-atleastonce-fail.yaml b/test/e2e/testdata/sensor-atleastonce-fail.yaml new file mode 100644 index 0000000000..497ff830de --- /dev/null +++ b/test/e2e/testdata/sensor-atleastonce-fail.yaml @@ -0,0 +1,28 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-atleastonce-sensor +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-webhook + eventName: example + triggers: + - template: + name: trigger-atleastonce + http: + # Expect to fail + url: https://invalid.com/ + method: GET + atLeastOnce: true + retryStrategy: + steps: 100 + duration: 500s + factor: 5.0 diff --git a/test/e2e/testdata/sensor-atleastonce-succeed.yaml b/test/e2e/testdata/sensor-atleastonce-succeed.yaml new file mode 100644 index 0000000000..a20a498533 --- /dev/null +++ b/test/e2e/testdata/sensor-atleastonce-succeed.yaml @@ -0,0 +1,21 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-atleastonce-sensor +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-webhook + eventName: example + triggers: + - template: + name: trigger-atleastonce + log: {} + atLeastOnce: true diff --git a/test/e2e/testdata/sensor-atmostonce-fail.yaml b/test/e2e/testdata/sensor-atmostonce-fail.yaml new file mode 100644 index 0000000000..0aa8ebfea8 --- /dev/null +++ b/test/e2e/testdata/sensor-atmostonce-fail.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-atmostonce-sensor +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-webhook + eventName: example + triggers: + - template: + name: trigger-atmostonce + http: + # Expect to fail + url: https://invalid.com/ + method: GET + retryStrategy: + steps: 100 + duration: 500s + factor: 5.0 diff --git a/test/e2e/testdata/sensor-atmostonce-succeed.yaml b/test/e2e/testdata/sensor-atmostonce-succeed.yaml new file mode 100644 index 0000000000..93a4da0d95 --- /dev/null +++ b/test/e2e/testdata/sensor-atmostonce-succeed.yaml @@ -0,0 +1,20 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-atmostonce-sensor +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-webhook + eventName: example + triggers: + - template: + name: trigger-atmostonce + log: {} diff --git a/test/e2e/testdata/sensor-durable-consumer.yaml b/test/e2e/testdata/sensor-durable-consumer.yaml new file mode 100644 index 0000000000..d29c0063f1 --- /dev/null +++ b/test/e2e/testdata/sensor-durable-consumer.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-durable-consumer +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-durable-consumer + eventName: example1 + - name: test-dep-2 + eventSourceName: e2e-durable-consumer + eventName: example2 + - name: test-dep-3 + eventSourceName: e2e-durable-consumer + eventName: example3 + triggers: + - template: + conditions: "(test-dep-1 && test-dep-2) || test-dep-3" + name: log-trigger-1 + log: {} + - template: + conditions: "test-dep-1" + name: log-trigger-2 + log: {} diff --git a/test/e2e/testdata/sensor-log-ha-k8s.yaml b/test/e2e/testdata/sensor-log-ha-k8s.yaml new file mode 100644 index 0000000000..4896c753a9 --- /dev/null +++ b/test/e2e/testdata/sensor-log-ha-k8s.yaml @@ -0,0 +1,18 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + annotations: + events.argoproj.io/leader-election: k8s + name: e2e-log-ha-k8s +spec: + replicas: 2 + template: + serviceAccountName: argo-events-sa + dependencies: + - name: test-dep + eventSourceName: e2e-calendar-ha-k8s + eventName: example + triggers: + - template: + name: log-trigger + log: {} diff --git a/test/e2e/testdata/sensor-log-metrics.yaml b/test/e2e/testdata/sensor-log-metrics.yaml index ea8b0c946a..0b7d63b548 100644 --- a/test/e2e/testdata/sensor-log-metrics.yaml +++ b/test/e2e/testdata/sensor-log-metrics.yaml @@ -3,6 +3,8 @@ kind: Sensor metadata: name: e2e-log-metrics spec: + template: + serviceAccountName: argo-events-sa dependencies: - name: test-dep eventSourceName: e2e-calendar-metrics diff --git a/test/e2e/testdata/sensor-log.yaml b/test/e2e/testdata/sensor-log.yaml index 34ef0943b7..98f77b134e 100644 --- a/test/e2e/testdata/sensor-log.yaml +++ b/test/e2e/testdata/sensor-log.yaml @@ -3,6 +3,8 @@ kind: Sensor metadata: name: e2e-log spec: + template: + serviceAccountName: argo-events-sa dependencies: - name: test-dep eventSourceName: e2e-calendar diff --git a/test/e2e/testdata/sensor-multi-dep.yaml b/test/e2e/testdata/sensor-multi-dep.yaml new file mode 100644 index 0000000000..9f6fe398fe --- /dev/null +++ b/test/e2e/testdata/sensor-multi-dep.yaml @@ -0,0 +1,31 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-multi-dep +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-multi-dep + eventName: example1 + - name: test-dep-2 + eventSourceName: e2e-multi-dep + eventName: example2 + - name: test-dep-3 + eventSourceName: e2e-multi-dep + eventName: example3 + triggers: + - template: + conditions: "(test-dep-1 && test-dep-2) || test-dep-3" + name: log-trigger-1 + log: {} + - template: + conditions: "test-dep-1" + name: log-trigger-2 + log: {} diff --git a/test/e2e/testdata/sensor-multi-sensor-2-atleastonce.yaml b/test/e2e/testdata/sensor-multi-sensor-2-atleastonce.yaml new file mode 100644 index 0000000000..a9bea2b912 --- /dev/null +++ b/test/e2e/testdata/sensor-multi-sensor-2-atleastonce.yaml @@ -0,0 +1,28 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-multi-sensor-2-atleastonce +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1-atleastonce + eventSourceName: e2e-multi-sensor + eventName: example1 + - name: test-dep-2-atleastonce + eventSourceName: e2e-multi-sensor + eventName: example2 + - name: test-dep-3-atleastonce + eventSourceName: e2e-multi-sensor + eventName: example3 + triggers: + - template: + conditions: "(test-dep-1-atleastonce && test-dep-2-atleastonce) || test-dep-3-atleastonce" + name: log-trigger-1-atleastonce + atLeastOnce: true + log: {} diff --git a/test/e2e/testdata/sensor-multi-sensor-2.yaml b/test/e2e/testdata/sensor-multi-sensor-2.yaml new file mode 100644 index 0000000000..7528012b2c --- /dev/null +++ b/test/e2e/testdata/sensor-multi-sensor-2.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-multi-sensor-2 +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-multi-sensor + eventName: example1 + - name: test-dep-2 + eventSourceName: e2e-multi-sensor + eventName: example2 + - name: test-dep-3 + eventSourceName: e2e-multi-sensor + eventName: example3 + triggers: + - template: + conditions: "(test-dep-1 && test-dep-2) || test-dep-3" + name: log-trigger-1 + log: {} diff --git a/test/e2e/testdata/sensor-multi-sensor-atleastonce.yaml b/test/e2e/testdata/sensor-multi-sensor-atleastonce.yaml new file mode 100644 index 0000000000..ec80de564e --- /dev/null +++ b/test/e2e/testdata/sensor-multi-sensor-atleastonce.yaml @@ -0,0 +1,28 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-multi-sensor-atleastonce +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1-atleastonce + eventSourceName: e2e-multi-sensor + eventName: example1 + - name: test-dep-2-atleastonce + eventSourceName: e2e-multi-sensor + eventName: example2 + - name: test-dep-3-atleastonce + eventSourceName: e2e-multi-sensor + eventName: example3 + triggers: + - template: + conditions: "(test-dep-1-atleastonce && test-dep-2-atleastonce) || test-dep-3-atleastonce" + name: log-trigger-1-atleastonce + atLeastOnce: true + log: {} diff --git a/test/e2e/testdata/sensor-multi-sensor.yaml b/test/e2e/testdata/sensor-multi-sensor.yaml new file mode 100644 index 0000000000..0fa7ca2de0 --- /dev/null +++ b/test/e2e/testdata/sensor-multi-sensor.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-multi-sensor +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-multi-sensor + eventName: example1 + - name: test-dep-2 + eventSourceName: e2e-multi-sensor + eventName: example2 + - name: test-dep-3 + eventSourceName: e2e-multi-sensor + eventName: example3 + triggers: + - template: + conditions: "(test-dep-1 && test-dep-2) || test-dep-3" + name: log-trigger-1 + log: {} diff --git a/test/e2e/testdata/sensor-resource.yaml b/test/e2e/testdata/sensor-resource.yaml index 94dc13a465..e11f0245cd 100644 --- a/test/e2e/testdata/sensor-resource.yaml +++ b/test/e2e/testdata/sensor-resource.yaml @@ -3,6 +3,8 @@ kind: Sensor metadata: name: e2e-resource-log spec: + template: + serviceAccountName: argo-events-sa dependencies: - name: test-dep eventSourceName: test-resource diff --git a/test/e2e/testdata/sensor-test-metrics.yaml b/test/e2e/testdata/sensor-test-metrics.yaml index a0c6b68950..913c06a0e8 100644 --- a/test/e2e/testdata/sensor-test-metrics.yaml +++ b/test/e2e/testdata/sensor-test-metrics.yaml @@ -3,6 +3,8 @@ kind: Sensor metadata: name: e2e-test-metrics spec: + template: + serviceAccountName: argo-events-sa dependencies: - name: test-dep eventSourceName: e2e-test-metrics-webhook @@ -15,5 +17,5 @@ spec: name: http-trigger http: # Expect to fail - url: https://not-existing.com/ + url: https://invalid.com/ method: GET \ No newline at end of file diff --git a/test/e2e/testdata/sensor-trigger-spec-change-2.yaml b/test/e2e/testdata/sensor-trigger-spec-change-2.yaml new file mode 100644 index 0000000000..736e4cf6be --- /dev/null +++ b/test/e2e/testdata/sensor-trigger-spec-change-2.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-trigger-spec-change +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-multi-event-webhook + eventName: example1 + - name: test-dep-2 + eventSourceName: e2e-multi-event-webhook + eventName: example2 + - name: test-dep-3 + eventSourceName: e2e-multi-event-webhook + eventName: example3 + triggers: + - template: + conditions: "test-dep-1 && test-dep-2" + name: log-trigger-1 + log: {} diff --git a/test/e2e/testdata/sensor-trigger-spec-change.yaml b/test/e2e/testdata/sensor-trigger-spec-change.yaml new file mode 100644 index 0000000000..c52c0a33d4 --- /dev/null +++ b/test/e2e/testdata/sensor-trigger-spec-change.yaml @@ -0,0 +1,27 @@ +apiVersion: argoproj.io/v1alpha1 +kind: Sensor +metadata: + name: e2e-trigger-spec-change +spec: + replicas: 1 + template: + serviceAccountName: argo-events-sa + container: + env: + - name: DEBUG_LOG + value: "true" + dependencies: + - name: test-dep-1 + eventSourceName: e2e-multi-event-webhook + eventName: example1 + - name: test-dep-2 + eventSourceName: e2e-multi-event-webhook + eventName: example2 + - name: test-dep-3 + eventSourceName: e2e-multi-event-webhook + eventName: example3 + triggers: + - template: + conditions: "(test-dep-1 && test-dep-2) || test-dep-3" + name: log-trigger-1 + log: {} diff --git a/test/manifests/kafka/kafka.yaml b/test/manifests/kafka/kafka.yaml new file mode 100644 index 0000000000..4c38956775 --- /dev/null +++ b/test/manifests/kafka/kafka.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + app: kafka + name: kafka +spec: + serviceName: kafka + replicas: 1 + selector: + matchLabels: + app: kafka + template: + metadata: + labels: + app: kafka + spec: + containers: + - env: + - name: KAFKA_ADVERTISED_PORT + value: "9092" + - name: KAFKA_ADVERTISED_HOST_NAME + value: "kafka" + - name: KAFKA_BROKER_ID + value: "0" + - name: KAFKA_PORT + value: "9092" + - name: KAFKA_ZOOKEEPER_CONNECT + value: "zookeeper:2181" + - name: KAFKA_LISTENERS + value: "INSIDE://:9092" + - name: KAFKA_ADVERTISED_LISTENERS + value: "INSIDE://:9092" + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: "INSIDE" + - name: KAFKA_LISTENER_SECURITY_PROTOCOL_MAP + value: "INSIDE:PLAINTEXT,OUTSIDE:PLAINTEXT" + image: wurstmeister/kafka + name: kafka + ports: + - containerPort: 9092 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: kafka + name: kafka +spec: + type: NodePort + selector: + app: kafka + ports: + - port: 9092 diff --git a/manifests/base/sensor-controller/kustomization.yaml b/test/manifests/kafka/kustomization.yaml similarity index 66% rename from manifests/base/sensor-controller/kustomization.yaml rename to test/manifests/kafka/kustomization.yaml index 6b120ddd82..c386e1eea6 100644 --- a/manifests/base/sensor-controller/kustomization.yaml +++ b/test/manifests/kafka/kustomization.yaml @@ -2,4 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - sensor-controller-deployment.yaml +- kafka.yaml +- zookeeper.yaml diff --git a/test/manifests/kafka/zookeeper.yaml b/test/manifests/kafka/zookeeper.yaml new file mode 100644 index 0000000000..be0b5dfbb9 --- /dev/null +++ b/test/manifests/kafka/zookeeper.yaml @@ -0,0 +1,38 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: zookeeper + name: zookeeper +spec: + replicas: 1 + selector: + matchLabels: + app: zookeeper + template: + metadata: + labels: + app: zookeeper + spec: + containers: + - image: wurstmeister/zookeeper + imagePullPolicy: IfNotPresent + name: zookeeper + ports: + - containerPort: 2181 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: zookeeper + name: zookeeper +spec: + type: NodePort + selector: + app: zookeeper + ports: + - name: zookeeper-port + port: 2181 + nodePort: 30181 + targetPort: 2181 diff --git a/test/manifests/kustomization.yaml b/test/manifests/kustomization.yaml index 5dd2e8efe3..32027ed93b 100644 --- a/test/manifests/kustomization.yaml +++ b/test/manifests/kustomization.yaml @@ -2,10 +2,45 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - - ../../manifests/cluster-install - - ../../manifests/extensions/validating-webhook +- ../../manifests/cluster-install +- ../../manifests/extensions/validating-webhook +patches: +- patch: |- + - op: replace + path: /spec/template/spec/containers/0/imagePullPolicy + value: IfNotPresent + target: + kind: Deployment + name: controller-manager +- patch: |- + - op: replace + path: /spec/template/spec/containers/0/imagePullPolicy + value: IfNotPresent + target: + kind: Deployment + name: events-webhook +- patch: |- + - op: add + path: /spec/template/spec/containers/0/env/- + value: + name: IMAGE_PULL_POLICY + value: IfNotPresent + target: + kind: Deployment + name: controller-manager +- patch: |- + - op: add + path: /spec/template/spec/containers/0/env/- + value: + name: SHOULD_REPORT_TO_CF + value: "false" + target: + kind: Deployment + name: controller-manager namespace: argo-events -commonLabels: - "app.kubernetes.io/part-of": "argo-events" +labels: +- includeSelectors: true + pairs: + app.kubernetes.io/part-of: argo-events diff --git a/test/stress/README.md b/test/stress/README.md index 55352d615a..eed1d332c0 100644 --- a/test/stress/README.md +++ b/test/stress/README.md @@ -11,8 +11,8 @@ Argo Events provides a set of simple tools to do stress testing: - Set up Prometheus metrics monitoring. Follow the [instruction](https://argoproj.github.io/argo-events/metrics/) to - set up Prometheus to grab metrics, also make sure the basic Kubenetes metrics - like Pod CPU/memory ussage are captured. Display the metrics using tools like + set up Prometheus to grab metrics, also make sure the basic Kubernetes metrics + like Pod CPU/memory usage are captured. Display the metrics using tools like [Grafana](https://grafana.com/). - Create the EventSource and Sensor for testing. @@ -20,7 +20,7 @@ Argo Events provides a set of simple tools to do stress testing: You can use the tool below to create the EventSource and Sensor, or use your existing ones for testing. If you want to run the testing against a webhook typed EventSource (e.g. `webhook`, `sns`, etc), you need to set up the ingress - for it beforehands. + for it beforehand. ```shell # Make sure you have sourced a KUBECONFIG file @@ -46,7 +46,7 @@ For example, command below creates a `webhook` EventSource and a Sensor with It will exit in 5 minutes and clean up the created resources. ```shell -go run ./test/stress/main.go --es-type webhook --trigger-type log --hard-timeout 5m +go run ./test/stress/main.go --eb-type jetstream --es-type webhook --trigger-type log --hard-timeout 5m ``` The spec of `webhook` EventSource is located in @@ -82,5 +82,5 @@ It could also hook into your existing EventSources and Sensors to give a simple report. ```shell -go run ./test/stress/main.go --es-name my-sqs-es --sensor-name my-sensor --hard-timeout 5m +go run ./test/stress/main.go --eb-type jetstream --es-name my-sqs-es --sensor-name my-sensor --hard-timeout 5m ``` diff --git a/test/stress/main.go b/test/stress/main.go index 23c6a5e792..4a98910a13 100644 --- a/test/stress/main.go +++ b/test/stress/main.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "io/ioutil" "os" "path/filepath" "regexp" @@ -59,10 +58,10 @@ const ( logEventSourceStarted = "Eventing server started." logSensorStarted = "Sensor started." - logTriggerActionSuccessful = "successfully processed the trigger" - logTriggerActionFailed = "failed to execute a trigger" - logEventSuccessful = "succeeded to publish an event" - logEventFailed = "failed to publish an event" + logTriggerActionSuccessful = "Successfully processed trigger" + logTriggerActionFailed = "Failed to execute a trigger" + logEventSuccessful = "Succeeded to publish an event" + logEventFailed = "Failed to publish an event" ) type TestingEventSource string @@ -78,6 +77,15 @@ const ( RedisEventSource TestingEventSource = "redis" ) +type EventBusType string + +// possible value of EventBus type +const ( + UnsupportedEventBusType EventBusType = "unsupported" + STANEventBus EventBusType = "stan" + JetstreamEventBus EventBusType = "jetstream" +) + type TestingTrigger string // possible values of TestingTrigger @@ -95,6 +103,7 @@ type options struct { namespace string testingEventSource TestingEventSource testingTrigger TestingTrigger + eventBusType EventBusType esName string sensorName string // Inactive time before exiting @@ -109,7 +118,7 @@ type options struct { restConfig *rest.Config } -func NewOptions(testingEventSource TestingEventSource, testingTrigger TestingTrigger, esName, sensorName string, idleTimeout time.Duration, noCleanUp bool) (*options, error) { +func NewOptions(testingEventSource TestingEventSource, testingTrigger TestingTrigger, eventBusType EventBusType, esName, sensorName string, idleTimeout time.Duration, noCleanUp bool) (*options, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() configOverrides := &clientcmd.ConfigOverrides{} kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) @@ -129,6 +138,7 @@ func NewOptions(testingEventSource TestingEventSource, testingTrigger TestingTri namespace: namespace, testingEventSource: testingEventSource, testingTrigger: testingTrigger, + eventBusType: eventBusType, esName: esName, sensorName: sensorName, kubeClient: kubeClient, @@ -142,10 +152,10 @@ func NewOptions(testingEventSource TestingEventSource, testingTrigger TestingTri } func (o *options) createEventBus(ctx context.Context) (*eventbusv1alpha1.EventBus, error) { - fmt.Println("------- Creating EventBus -------") + fmt.Printf("------- Creating %v EventBus -------\n", o.eventBusType) eb := &eventbusv1alpha1.EventBus{} - if err := readResource("@testdata/eventbus/default.yaml", eb); err != nil { - return nil, fmt.Errorf("failed to read event bus yaml file: %w", err) + if err := readResource(fmt.Sprintf("@testdata/eventbus/%v.yaml", o.eventBusType), eb); err != nil { + return nil, fmt.Errorf("failed to read %v event bus yaml file: %w", o.eventBusType, err) } l := eb.GetLabels() if l == nil { @@ -191,7 +201,7 @@ func (o *options) createEventSource(ctx context.Context) (*eventsourcev1alpha1.E if err := testutil.WaitForEventSourceDeploymentReady(ctx, o.kubeClient, o.namespace, es.Name, defaultTimeout); err != nil { return nil, fmt.Errorf("expected to see event source deployment and pod ready: %w", err) } - contains, err := testutil.EventSourcePodLogContains(ctx, o.kubeClient, o.namespace, es.Name, logEventSourceStarted, defaultTimeout) + contains, err := testutil.EventSourcePodLogContains(ctx, o.kubeClient, o.namespace, es.Name, logEventSourceStarted) if err != nil { return nil, fmt.Errorf("expected to see event source pod contains something: %w", err) } @@ -225,7 +235,7 @@ func (o *options) createSensor(ctx context.Context) (*sensorv1alpha1.Sensor, err if err := testutil.WaitForSensorDeploymentReady(ctx, o.kubeClient, o.namespace, sensor.Name, defaultTimeout); err != nil { return nil, fmt.Errorf("expected to see sensor deployment and pod ready: %w", err) } - contains, err := testutil.SensorPodLogContains(ctx, o.kubeClient, o.namespace, sensor.Name, logSensorStarted, defaultTimeout) + contains, err := testutil.SensorPodLogContains(ctx, o.kubeClient, o.namespace, sensor.Name, logSensorStarted) if err != nil { return nil, fmt.Errorf("expected to see sensor pod contains something: %w", err) } @@ -277,23 +287,10 @@ func (o *options) runTesting(ctx context.Context, eventSourceName, sensorName st return fmt.Errorf("no pod found for sensor %s", sensorName) } - successActionReg, err := regexp.Compile(logTriggerActionSuccessful) - if err != nil { - return fmt.Errorf("failed to compile regex for sensor success pattern: %v", err) - } - failureActionReg, err := regexp.Compile(logTriggerActionFailed) - if err != nil { - return fmt.Errorf("failed to compile regex for sensor failure pattern: %v", err) - } - - successEventReg, err := regexp.Compile(logEventSuccessful) - if err != nil { - return fmt.Errorf("failed to compile regex for event source success pattern: %v", err) - } - failureEventReg, err := regexp.Compile(logEventFailed) - if err != nil { - return fmt.Errorf("failed to compile regex for event source failure pattern: %v", err) - } + successActionReg := regexp.MustCompile(logTriggerActionSuccessful) + failureActionReg := regexp.MustCompile(logTriggerActionFailed) + successEventReg := regexp.MustCompile(logEventSuccessful) + failureEventReg := regexp.MustCompile(logEventFailed) fmt.Printf(` ********************************************************* @@ -315,8 +312,8 @@ Or you can terminate it any time by Ctrl + C. sensorMap := map[string]int64{} sensorTimeMap := map[string]time.Time{} - var esLock = &sync.Mutex{} - var sensorLock = &sync.Mutex{} + var esLock = &sync.RWMutex{} + var sensorLock = &sync.RWMutex{} startTime := time.Now() @@ -362,6 +359,7 @@ Or you can terminate it any time by Ctrl + C. } timeout := 5 * 60 * time.Second lastActionTime := startTime + sensorLock.RLock() if len(sensorMap) > 0 && len(sensorTimeMap) > 0 { timeout = o.idleTimeout for _, v := range sensorTimeMap { @@ -370,6 +368,7 @@ Or you can terminate it any time by Ctrl + C. } } } + sensorLock.RUnlock() if time.Since(lastActionTime).Seconds() > timeout.Seconds() { fmt.Printf("Exited Sensor Pod %s due to no actions in the last %v\n", podName, o.idleTimeout) @@ -446,6 +445,8 @@ Or you can terminate it any time by Ctrl + C. } timeout := 5 * 60 * time.Second lastEventTime := startTime + + esLock.RLock() if len(esMap) > 0 && len(esTimeMap) > 0 { timeout = o.idleTimeout for _, v := range esTimeMap { @@ -454,6 +455,7 @@ Or you can terminate it any time by Ctrl + C. } } } + esLock.RUnlock() if time.Since(lastEventTime).Seconds() > timeout.Seconds() { fmt.Printf("Exited EventSource Pod %s due to no active events in the last %v\n", podName, o.idleTimeout) return @@ -671,7 +673,7 @@ func readResource(text string, v metav1.Object) error { if strings.HasPrefix(text, "@") { file := strings.TrimPrefix(text, "@") _, fileName, _, _ := runtime.Caller(0) - data, err = ioutil.ReadFile(filepath.Dir(fileName) + "/" + file) + data, err = os.ReadFile(filepath.Dir(fileName) + "/" + file) if err != nil { return fmt.Errorf("failed to read a file: %w", err) } @@ -703,6 +705,17 @@ func getTestingEventSource(str string) TestingEventSource { } } +func getEventBusType(str string) EventBusType { + switch str { + case "jetstream": + return JetstreamEventBus + case "stan": + return STANEventBus + default: + return UnsupportedEventBusType + } +} + func getTestingTrigger(str string) TestingTrigger { switch str { case "log": @@ -716,6 +729,7 @@ func getTestingTrigger(str string) TestingTrigger { func main() { var ( + ebTypeStr string esTypeStr string triggerTypeStr string esName string @@ -748,6 +762,12 @@ func main() { cmd.HelpFunc()(cmd, args) os.Exit(1) } + eventBusType := getEventBusType(ebTypeStr) + if eventBusType == UnsupportedEventBusType { + fmt.Printf("Invalid event bus type %s\n\n", ebTypeStr) + cmd.HelpFunc()(cmd, args) + os.Exit(1) + } idleTimeout, err := time.ParseDuration(idleTimeoutStr) if err != nil { @@ -755,7 +775,7 @@ func main() { cmd.HelpFunc()(cmd, args) os.Exit(1) } - opts, err := NewOptions(esType, triggerType, esName, sensorName, idleTimeout, noCleanUp) + opts, err := NewOptions(esType, triggerType, eventBusType, esName, sensorName, idleTimeout, noCleanUp) if err != nil { fmt.Printf("Failed: %v\n", err) os.Exit(1) @@ -775,6 +795,7 @@ func main() { } }, } + rootCmd.Flags().StringVarP(&ebTypeStr, "eb-type", "b", "", "Type of event bus to be tested: stan, jetstream") rootCmd.Flags().StringVarP(&esTypeStr, "es-type", "e", "", "Type of event source to be tested, e.g. webhook, sqs, etc.") rootCmd.Flags().StringVarP(&triggerTypeStr, "trigger-type", "t", string(LogTrigger), "Type of trigger to be tested, e.g. log, workflow.") rootCmd.Flags().StringVar(&esName, "es-name", "", "Name of an existing event source to be tested") diff --git a/test/stress/testdata/eventbus/jetstream.yaml b/test/stress/testdata/eventbus/jetstream.yaml new file mode 100644 index 0000000000..6d88d4f100 --- /dev/null +++ b/test/stress/testdata/eventbus/jetstream.yaml @@ -0,0 +1,7 @@ +apiVersion: argoproj.io/v1alpha1 +kind: EventBus +metadata: + name: default +spec: + jetstream: + version: latest diff --git a/test/stress/testdata/eventbus/default.yaml b/test/stress/testdata/eventbus/stan.yaml similarity index 100% rename from test/stress/testdata/eventbus/default.yaml rename to test/stress/testdata/eventbus/stan.yaml diff --git a/test/util/util.go b/test/util/util.go index 32e3dc0a07..1175e4486c 100644 --- a/test/util/util.go +++ b/test/util/util.go @@ -5,16 +5,13 @@ import ( "context" "fmt" "regexp" + "sync" "time" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" - eventsourcev1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventsource/v1alpha1" - sensorv1alpha1 "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" eventbuspkg "github.com/argoproj/argo-events/pkg/client/eventbus/clientset/versioned/typed/eventbus/v1alpha1" eventsourcepkg "github.com/argoproj/argo-events/pkg/client/eventsource/clientset/versioned/typed/eventsource/v1alpha1" sensorpkg "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned/typed/sensor/v1alpha1" @@ -23,127 +20,66 @@ import ( func WaitForEventBusReady(ctx context.Context, eventBusClient eventbuspkg.EventBusInterface, eventBusName string, timeout time.Duration) error { fieldSelector := "metadata.name=" + eventBusName opts := metav1.ListOptions{FieldSelector: fieldSelector} - watch, err := eventBusClient.Watch(ctx, opts) - if err != nil { - return err - } - defer watch.Stop() - timeoutCh := make(chan bool, 1) - go func() { - time.Sleep(timeout) - timeoutCh <- true - }() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() for { select { - case event := <-watch.ResultChan(): - eb, ok := event.Object.(*eventbusv1alpha1.EventBus) - if ok { - if eb.Status.IsReady() { - return nil - } - } else { - return fmt.Errorf("not eventbus") - } - case <-timeoutCh: + case <-ctx.Done(): return fmt.Errorf("timeout after %v waiting for EventBus ready", timeout) + default: } + ebList, err := eventBusClient.List(ctx, opts) + if err != nil { + return fmt.Errorf("error getting EventBus list: %w", err) + } + if len(ebList.Items) > 0 && ebList.Items[0].Status.IsReady() { + return nil + } + time.Sleep(1 * time.Second) } } func WaitForEventBusStatefulSetReady(ctx context.Context, kubeClient kubernetes.Interface, namespace, eventBusName string, timeout time.Duration) error { labelSelector := fmt.Sprintf("controller=eventbus-controller,eventbus-name=%s", eventBusName) opts := metav1.ListOptions{LabelSelector: labelSelector} - watch, err := kubeClient.AppsV1().StatefulSets(namespace).Watch(ctx, opts) - if err != nil { - return err - } - defer watch.Stop() - timeoutCh := make(chan bool, 1) - go func() { - time.Sleep(timeout) - timeoutCh <- true - }() - -statefulSetWatch: + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() for { select { - case event := <-watch.ResultChan(): - ss, ok := event.Object.(*appsv1.StatefulSet) - if ok { - if ss.Status.Replicas == ss.Status.ReadyReplicas { - break statefulSetWatch - } - } else { - return fmt.Errorf("not statefulset") - } - case <-timeoutCh: + case <-ctx.Done(): return fmt.Errorf("timeout after %v waiting for EventBus StatefulSet ready", timeout) + default: } - } - - // POD - podWatch, err := kubeClient.CoreV1().Pods(namespace).Watch(ctx, opts) - if err != nil { - return err - } - defer podWatch.Stop() - podTimeoutCh := make(chan bool, 1) - go func() { - time.Sleep(timeout) - podTimeoutCh <- true - }() - - podNames := make(map[string]bool) - for { - if len(podNames) == 3 { - // defaults to 3 Pods - return nil + stsList, err := kubeClient.AppsV1().StatefulSets(namespace).List(ctx, opts) + if err != nil { + return fmt.Errorf("error getting EventBus StatefulSet list: %w", err) } - select { - case event := <-podWatch.ResultChan(): - p, ok := event.Object.(*corev1.Pod) - if ok { - if p.Status.Phase == corev1.PodRunning { - if _, existing := podNames[p.GetName()]; !existing { - podNames[p.GetName()] = true - } - } - } else { - return fmt.Errorf("not pod") - } - case <-podTimeoutCh: - return fmt.Errorf("timeout after %v waiting for event bus Pod ready", timeout) + if len(stsList.Items) > 0 && stsList.Items[0].Status.Replicas == stsList.Items[0].Status.ReadyReplicas { + return nil } + time.Sleep(1 * time.Second) } } func WaitForEventSourceReady(ctx context.Context, eventSourceClient eventsourcepkg.EventSourceInterface, eventSourceName string, timeout time.Duration) error { fieldSelector := "metadata.name=" + eventSourceName opts := metav1.ListOptions{FieldSelector: fieldSelector} - watch, err := eventSourceClient.Watch(ctx, opts) - if err != nil { - return err - } - defer watch.Stop() - timeoutCh := make(chan bool, 1) - go func() { - time.Sleep(timeout) - timeoutCh <- true - }() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() for { select { - case event := <-watch.ResultChan(): - es, ok := event.Object.(*eventsourcev1alpha1.EventSource) - if ok { - if es.Status.IsReady() { - return nil - } - } else { - return fmt.Errorf("not eventsource") - } - case <-timeoutCh: + case <-ctx.Done(): return fmt.Errorf("timeout after %v waiting for EventSource ready", timeout) + default: } + esList, err := eventSourceClient.List(ctx, opts) + if err != nil { + return fmt.Errorf("error getting EventSource list: %w", err) + } + if len(esList.Items) > 0 && esList.Items[0].Status.IsReady() { + return nil + } + time.Sleep(1 * time.Second) } } @@ -155,30 +91,22 @@ func WaitForEventSourceDeploymentReady(ctx context.Context, kubeClient kubernete func WaitForSensorReady(ctx context.Context, sensorClient sensorpkg.SensorInterface, sensorName string, timeout time.Duration) error { fieldSelector := "metadata.name=" + sensorName opts := metav1.ListOptions{FieldSelector: fieldSelector} - watch, err := sensorClient.Watch(ctx, opts) - if err != nil { - return err - } - defer watch.Stop() - timeoutCh := make(chan bool, 1) - go func() { - time.Sleep(timeout) - timeoutCh <- true - }() + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() for { select { - case event := <-watch.ResultChan(): - s, ok := event.Object.(*sensorv1alpha1.Sensor) - if ok { - if s.Status.IsReady() { - return nil - } - } else { - return fmt.Errorf("not sensor") - } - case <-timeoutCh: + case <-ctx.Done(): return fmt.Errorf("timeout after %v waiting for Sensor ready", timeout) + default: + } + sensorList, err := sensorClient.List(ctx, opts) + if err != nil { + return fmt.Errorf("error getting Sensor list: %w", err) } + if len(sensorList.Items) > 0 && sensorList.Items[0].Status.IsReady() { + return nil + } + time.Sleep(1 * time.Second) } } @@ -189,91 +117,110 @@ func WaitForSensorDeploymentReady(ctx context.Context, kubeClient kubernetes.Int func waitForDeploymentAndPodReady(ctx context.Context, kubeClient kubernetes.Interface, namespace, objectType, labelSelector string, timeout time.Duration) error { opts := metav1.ListOptions{LabelSelector: labelSelector} - deployWatch, err := kubeClient.AppsV1().Deployments(namespace).Watch(ctx, opts) - if err != nil { - return err - } - defer deployWatch.Stop() - deployTimeoutCh := make(chan bool, 1) - go func() { - time.Sleep(timeout) - deployTimeoutCh <- true - }() - -deployWatch: + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() for { select { - case event := <-deployWatch.ResultChan(): - ss, ok := event.Object.(*appsv1.Deployment) - if ok { - if ss.Status.Replicas == ss.Status.AvailableReplicas { - break deployWatch - } - } else { - return fmt.Errorf("not deployment") - } - case <-deployTimeoutCh: - return fmt.Errorf("timeout after %v waiting for %s Deployment ready", timeout, objectType) + case <-ctx.Done(): + return fmt.Errorf("timeout after %v waiting for deployment ready", timeout) + default: } + deployList, err := kubeClient.AppsV1().Deployments(namespace).List(ctx, opts) + if err != nil { + return fmt.Errorf("error getting deployment list: %w", err) + } + ok := len(deployList.Items) == 1 + if !ok { + continue + } + ok = ok && deployList.Items[0].Status.Replicas == deployList.Items[0].Status.ReadyReplicas + podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) + if err != nil { + return fmt.Errorf("error getting deployment pod list: %w", err) + } + ok = ok && len(podList.Items) > 0 && len(podList.Items) == int(*deployList.Items[0].Spec.Replicas) + for _, p := range podList.Items { + ok = ok && p.Status.Phase == corev1.PodRunning + } + if ok { + return nil + } + time.Sleep(1 * time.Second) } +} - // POD - podWatch, err := kubeClient.CoreV1().Pods(namespace).Watch(ctx, opts) - if err != nil { - return err +type podLogCheckOptions struct { + timeout time.Duration + count int +} + +func defaultPodLogCheckOptions() *podLogCheckOptions { + return &podLogCheckOptions{ + timeout: 15 * time.Second, + count: -1, } - defer podWatch.Stop() - podTimeoutCh := make(chan bool, 1) - go func() { - time.Sleep(timeout) - podTimeoutCh <- true - }() - for { - select { - case event := <-podWatch.ResultChan(): - p, ok := event.Object.(*corev1.Pod) - if ok { - if p.Status.Phase == corev1.PodRunning { - return nil - } - } else { - return fmt.Errorf("not Pod") - } - case <-podTimeoutCh: - return fmt.Errorf("timeout after %v waiting for %s Pod ready", timeout, objectType) - } +} + +type PodLogCheckOption func(*podLogCheckOptions) + +func PodLogCheckOptionWithTimeout(t time.Duration) PodLogCheckOption { + return func(o *podLogCheckOptions) { + o.timeout = t + } +} + +func PodLogCheckOptionWithCount(c int) PodLogCheckOption { + return func(o *podLogCheckOptions) { + o.count = c } } -func EventSourcePodLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, eventSourceName, regex string, timeout time.Duration) (bool, error) { +func EventSourcePodLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, eventSourceName, regex string, options ...PodLogCheckOption) (bool, error) { labelSelector := fmt.Sprintf("controller=eventsource-controller,eventsource-name=%s", eventSourceName) podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) if err != nil { return false, fmt.Errorf("error getting event source pod name: %w", err) } - return PodsLogContains(ctx, kubeClient, namespace, regex, podList, timeout), nil + return PodsLogContains(ctx, kubeClient, namespace, regex, podList, options...), nil } -func SensorPodLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, sensorName, regex string, timeout time.Duration) (bool, error) { +func SensorPodLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, sensorName, regex string, options ...PodLogCheckOption) (bool, error) { labelSelector := fmt.Sprintf("controller=sensor-controller,sensor-name=%s", sensorName) podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector, FieldSelector: "status.phase=Running"}) if err != nil { return false, fmt.Errorf("error getting sensor pod name: %w", err) } - return PodsLogContains(ctx, kubeClient, namespace, regex, podList, timeout), nil + return PodsLogContains(ctx, kubeClient, namespace, regex, podList, options...), nil } -func PodsLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, regex string, podList *corev1.PodList, timeout time.Duration) bool { - cctx, cancel := context.WithTimeout(ctx, timeout) +func PodsLogContains(ctx context.Context, kubeClient kubernetes.Interface, namespace, regex string, podList *corev1.PodList, options ...PodLogCheckOption) bool { + // parse options + o := defaultPodLogCheckOptions() + for _, opt := range options { + if opt != nil { + opt(o) + } + } + + cctx, cancel := context.WithTimeout(ctx, o.timeout) defer cancel() errChan := make(chan error) resultChan := make(chan bool) + wg := &sync.WaitGroup{} for _, p := range podList.Items { + wg.Add(1) go func(podName string) { + defer wg.Done() fmt.Printf("Watching POD: %s\n", podName) - contains, err := podLogContains(cctx, kubeClient, namespace, podName, regex) + var contains bool + var err error + if o.count == -1 { + contains, err = podLogContains(cctx, kubeClient, namespace, podName, regex) + } else { + contains, err = podLogContainsCount(cctx, kubeClient, namespace, podName, regex, o.count) + } if err != nil { errChan <- err return @@ -283,21 +230,33 @@ func PodsLogContains(ctx context.Context, kubeClient kubernetes.Interface, names } }(p.Name) } - + allDone := make(chan bool) + go func() { + wg.Wait() + close(allDone) + }() for { select { - case <-cctx.Done(): - return false case result := <-resultChan: if result { return true + } else { + fmt.Println("read resultChan but not true") } case err := <-errChan: fmt.Printf("error: %v", err) + case <-allDone: + for len(resultChan) > 0 { + if x := <-resultChan; x { + return true + } + } + return false } } } +// look for at least one instance of the regex string in the log func podLogContains(ctx context.Context, client kubernetes.Interface, namespace, podName, regex string) (bool, error) { stream, err := client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}).Stream(ctx) if err != nil { @@ -327,3 +286,70 @@ func podLogContains(ctx context.Context, client kubernetes.Interface, namespace, } } } + +// look for a specific number of instances of the regex string in the log +func podLogContainsCount(ctx context.Context, client kubernetes.Interface, namespace, podName, regex string, count int) (bool, error) { + stream, err := client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{Follow: true}).Stream(ctx) + if err != nil { + return false, err + } + defer func() { _ = stream.Close() }() + + exp, err := regexp.Compile(regex) + if err != nil { + return false, err + } + + instancesChan := make(chan struct{}) + + // scan the log looking for matches + go func(ctx context.Context, instancesChan chan<- struct{}) { + s := bufio.NewScanner(stream) + for { + select { + case <-ctx.Done(): + return + default: + if !s.Scan() { + return + } + data := s.Bytes() + fmt.Println(string(data)) + if exp.Match(data) { + instancesChan <- struct{}{} + } + } + } + }(ctx, instancesChan) + + actualCount := 0 + for { + select { + case <-instancesChan: + actualCount++ + case <-ctx.Done(): + fmt.Printf("time:%v, count:%d,actualCount:%d\n", time.Now().Unix(), count, actualCount) + return count == actualCount, nil + } + } +} + +func WaitForNoPodFound(ctx context.Context, kubeClient kubernetes.Interface, namespace, labelSelector string, timeout time.Duration) error { + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + for { + podList, err := kubeClient.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return fmt.Errorf("error getting pod list: %w", err) + } + if len(podList.Items) == 0 { + return nil + } + select { + case <-ctx.Done(): + return fmt.Errorf("timeout waiting for pod disappearing") + default: + } + time.Sleep(2 * time.Second) + } +} diff --git a/third_party/nats-streaming-docker/README.md b/third_party/nats-streaming-docker/README.md index 0ffb0944d9..ac1b3a2302 100644 --- a/third_party/nats-streaming-docker/README.md +++ b/third_party/nats-streaming-docker/README.md @@ -5,4 +5,4 @@ This is a partial copy of current [nats-streaming-server](https://github.com/nats-io/nats-streaming-server) version is -[v0.17.0](https://github.com/nats-io/nats-streaming-server/tree/v0.17.0). +[v0.25.2](https://github.com/nats-io/nats-streaming-server/releases/tag/v0.25.2). diff --git a/third_party/nats-streaming-docker/amd64/nats-streaming-server b/third_party/nats-streaming-docker/amd64/nats-streaming-server index dd0ef073ff..43ddc7aed2 100755 Binary files a/third_party/nats-streaming-docker/amd64/nats-streaming-server and b/third_party/nats-streaming-docker/amd64/nats-streaming-server differ diff --git a/third_party/nats-streaming-docker/update.sh b/third_party/nats-streaming-docker/update.sh index ddb5e714a8..8ec1eccbbb 100755 --- a/third_party/nats-streaming-docker/update.sh +++ b/third_party/nats-streaming-docker/update.sh @@ -9,33 +9,12 @@ fi VERSION=$1 -# cd to the current directory so the script can be run from anywhere. cd `dirname $0` -echo "Fetching and building nats-streaming-server $VERSION..." +echo "Downloading nats-streaming-server $VERSION..." -# Create a tmp build directory. -TEMP=/tmp/nats-streaming.build -mkdir $TEMP - -git clone -b $VERSION https://github.com/nats-io/nats-streaming-server $TEMP - -docker build -t nats-streaming-builder $TEMP - -# Create a dummy nats streaming builder container so we can run a cp against it. -ID=$(docker create nats-streaming-builder) - -# Update the local binaries. -docker cp $ID:/go/src/github.com/nats-io/nats-streaming-server/pkg/linux-amd64/nats-streaming-server amd64/ -#docker cp $ID:/go/src/github.com/nats-io/nats-streaming-server/pkg/linux-arm6/nats-streaming-server arm32v6/ -#docker cp $ID:/go/src/github.com/nats-io/nats-streaming-server/pkg/linux-arm7/nats-streaming-server arm32v7/ -#docker cp $ID:/go/src/github.com/nats-io/nats-streaming-server/pkg/linux-arm64/nats-streaming-server arm64v8/ -#docker cp $ID:/go/src/github.com/nats-io/nats-streaming-server/pkg/win-amd64/nats-streaming-server.exe windows/nanoserver-1809/ -#docker cp $ID:/go/src/github.com/nats-io/nats-streaming-server/pkg/win-amd64/nats-streaming-server.exe windows/windowsservercore/ - -# Cleanup. -rm -fr $TEMP -docker rm -f $ID -docker rmi nats-streaming-builder +wget -O nats-streaming-server.tar.gz "https://github.com/nats-io/nats-streaming-server/releases/download/${VERSION}/nats-streaming-server-${VERSION}-linux-amd64.tar.gz" +tar -xf nats-streaming-server.tar.gz +rm nats-streaming-server.tar.gz echo "Done." diff --git a/third_party/prometheus-nats-exporter-docker/README.md b/third_party/prometheus-nats-exporter-docker/README.md index 571e116738..75e42fea0c 100644 --- a/third_party/prometheus-nats-exporter-docker/README.md +++ b/third_party/prometheus-nats-exporter-docker/README.md @@ -3,4 +3,4 @@ This is a partial copy of [prometheus-nats-exporter-docker](https://github.com/nats-io/prometheus-nats-exporter/tree/master/docker), current version is -[v0.6.2](https://github.com/nats-io/prometheus-nats-exporter/releases/tag/v0.6.2). +[v0.8.0](https://github.com/nats-io/prometheus-nats-exporter/releases/tag/v0.8.0). diff --git a/third_party/prometheus-nats-exporter-docker/amd64/Dockerfile b/third_party/prometheus-nats-exporter-docker/amd64/Dockerfile index f38c4394f0..89e7ebb2ce 100644 --- a/third_party/prometheus-nats-exporter-docker/amd64/Dockerfile +++ b/third_party/prometheus-nats-exporter-docker/amd64/Dockerfile @@ -1,11 +1,7 @@ # Golang binary building stage -FROM golang:1.14 - -# download the source -WORKDIR /go/src/github.com/nats-io/prometheus-nats-exporter -RUN git clone --branch v0.6.2 https://github.com/nats-io/prometheus-nats-exporter.git . - -# build +FROM golang:1.20.2 +WORKDIR $GOPATH/src/github.com/nats-io/prometheus-nats-exporter +RUN git clone --branch v0.10.1 https://github.com/nats-io/prometheus-nats-exporter.git . RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -v -a -tags netgo -installsuffix netgo -ldflags "-s -w" # Final docker image building stage diff --git a/version.go b/version.go index 27c3481686..09955a41f1 100644 --- a/version.go +++ b/version.go @@ -58,7 +58,7 @@ func GetVersion() Version { } else { // otherwise formulate a version string based on as much metadata // information we have available. - versionStr = "v" + version + versionStr = version if len(gitCommit) >= 7 { versionStr += "+" + gitCommit[0:7] if gitTreeState != "clean" { diff --git a/webhook/cmd/start.go b/webhook/cmd/start.go index 5e59437efb..018b88e28a 100644 --- a/webhook/cmd/start.go +++ b/webhook/cmd/start.go @@ -3,6 +3,7 @@ package cmd import ( "crypto/tls" "os" + "strconv" "go.uber.org/zap" "k8s.io/apimachinery/pkg/runtime" @@ -19,10 +20,15 @@ import ( eventsourceclient "github.com/argoproj/argo-events/pkg/client/eventsource/clientset/versioned" sensorclient "github.com/argoproj/argo-events/pkg/client/sensor/clientset/versioned" "github.com/argoproj/argo-events/webhook" + envpkg "github.com/argoproj/pkg/env" ) const ( - namespaceEnvVar = "NAMESPACE" + serviceNameEnvVar = "SERVICE_NAME" + deploymentNameEnvVar = "DEPLOYMENT_NAME" + clusterRoleNameEnvVar = "CLUSTER_ROLE_NAME" + namespaceEnvVar = "NAMESPACE" + portEnvVar = "PORT" ) func Start() { @@ -39,17 +45,24 @@ func Start() { namespace, defined := os.LookupEnv(namespaceEnvVar) if !defined { - logger.Fatalf("required environment variable '%s' not defined", namespaceEnvVar) + logger.Fatalf("required environment variable %q not defined", namespaceEnvVar) + } + + portStr := envpkg.LookupEnvStringOr(portEnvVar, "443") + port, err := strconv.Atoi(portStr) + if err != nil { + logger.Fatalf("port should be a number, environment variable %q not valid", portStr) } options := webhook.Options{ - ServiceName: "events-webhook", - DeploymentName: "events-webhook", - Namespace: namespace, - Port: 443, - SecretName: "events-webhook-certs", - WebhookName: "webhook.argo-events.argoproj.io", - ClientAuth: tls.VerifyClientCertIfGiven, + ServiceName: envpkg.LookupEnvStringOr(serviceNameEnvVar, "events-webhook"), + DeploymentName: envpkg.LookupEnvStringOr(deploymentNameEnvVar, "events-webhook"), + ClusterRoleName: envpkg.LookupEnvStringOr(clusterRoleNameEnvVar, "argo-events-webhook"), + Namespace: namespace, + Port: port, + SecretName: "events-webhook-certs", + WebhookName: "webhook.argo-events.argoproj.io", + ClientAuth: tls.VerifyClientCertIfGiven, } controller := webhook.AdmissionController{ Client: kubeClient, diff --git a/webhook/validator/eventbus.go b/webhook/validator/eventbus.go index 1de89222cc..ee5fce4821 100644 --- a/webhook/validator/eventbus.go +++ b/webhook/validator/eventbus.go @@ -33,6 +33,7 @@ func (eb *eventbus) ValidateCreate(ctx context.Context) *admissionv1.AdmissionRe if err := eventbuscontroller.ValidateEventBus(eb.neweb); err != nil { return DeniedResponse(err.Error()) } + return AllowedResponse() } @@ -43,28 +44,47 @@ func (eb *eventbus) ValidateUpdate(ctx context.Context) *admissionv1.AdmissionRe if err := eventbuscontroller.ValidateEventBus(eb.neweb); err != nil { return DeniedResponse(err.Error()) } - if eb.neweb.Spec.NATS != nil { + switch { + case eb.neweb.Spec.NATS != nil: if eb.oldeb.Spec.NATS == nil { - return DeniedResponse("Can not change event bus implmementation") + return DeniedResponse("Can not change event bus implementation") } oldNats := eb.oldeb.Spec.NATS newNats := eb.neweb.Spec.NATS if newNats.Native != nil { if oldNats.Native == nil { - return DeniedResponse("Can not change NATS event bus implmementation from exotic to native") + return DeniedResponse("Can not change NATS event bus implementation from exotic to native") } if authChanged(oldNats.Native.Auth, newNats.Native.Auth) { return DeniedResponse("\"spec.nats.native.auth\" is immutable, can not be updated") } } else if newNats.Exotic != nil { if oldNats.Exotic == nil { - return DeniedResponse("Can not change NATS event bus implmementation from native to exotic") + return DeniedResponse("Can not change NATS event bus implementation from native to exotic") } if authChanged(oldNats.Exotic.Auth, newNats.Exotic.Auth) { return DeniedResponse("\"spec.nats.exotic.auth\" is immutable, can not be updated") } } + case eb.neweb.Spec.JetStream != nil: + if eb.oldeb.Spec.JetStream == nil { + return DeniedResponse("Can not change event bus implementation") + } + oldJs := eb.oldeb.Spec.JetStream + newJs := eb.neweb.Spec.JetStream + if (oldJs.StreamConfig == nil && newJs.StreamConfig != nil) || + (oldJs.StreamConfig != nil && newJs.StreamConfig == nil) { + return DeniedResponse("\"spec.jetstream.streamConfig\" is immutable, can not be updated") + } + if oldJs.StreamConfig != nil && newJs.StreamConfig != nil && *oldJs.StreamConfig != *newJs.StreamConfig { + return DeniedResponse("\"spec.jetstream.streamConfig\" is immutable, can not be updated, old value='%s', new value='%s'", *oldJs.StreamConfig, *newJs.StreamConfig) + } + case eb.neweb.Spec.JetStreamExotic != nil: + if eb.oldeb.Spec.JetStreamExotic == nil { + return DeniedResponse("Can not change event bus implementation") + } } + return AllowedResponse() } diff --git a/webhook/validator/eventbus_test.go b/webhook/validator/eventbus_test.go index 5934dc5e71..91203aa25d 100644 --- a/webhook/validator/eventbus_test.go +++ b/webhook/validator/eventbus_test.go @@ -50,4 +50,16 @@ func TestValidateEventBusUpdate(t *testing.T) { r := v.ValidateUpdate(contextWithLogger(t)) assert.False(t, r.Allowed) }) + + t.Run("test update native nats to exotic js", func(t *testing.T) { + newEb := eb.DeepCopy() + newEb.Generation++ + newEb.Spec.NATS = nil + newEb.Spec.JetStreamExotic = &eventbusv1alpha1.JetStreamConfig{ + URL: "nats://nats:4222", + } + v := NewEventBusValidator(fakeK8sClient, fakeEventBusClient, fakeEventSourceClient, fakeSensorClient, eb, newEb) + r := v.ValidateUpdate(contextWithLogger(t)) + assert.False(t, r.Allowed) + }) } diff --git a/webhook/validator/eventsource_test.go b/webhook/validator/eventsource_test.go index 2178c66a58..d60fa1d0aa 100644 --- a/webhook/validator/eventsource_test.go +++ b/webhook/validator/eventsource_test.go @@ -2,7 +2,7 @@ package validator import ( "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" @@ -13,10 +13,13 @@ import ( func TestValidateEventSource(t *testing.T) { dir := "../../examples/event-sources" - files, err := ioutil.ReadDir(dir) + dirEntries, err := os.ReadDir(dir) assert.Nil(t, err) - for _, file := range files { - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name())) + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + content, err := os.ReadFile(fmt.Sprintf("%s/%s", dir, entry.Name())) assert.Nil(t, err) var es *v1alpha1.EventSource err = yaml.Unmarshal(content, &es) diff --git a/webhook/validator/sensor.go b/webhook/validator/sensor.go index 628671b84b..2c8722c529 100644 --- a/webhook/validator/sensor.go +++ b/webhook/validator/sensor.go @@ -2,8 +2,11 @@ package validator import ( "context" + "fmt" + "github.com/argoproj/argo-events/common" admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" sensorcontroller "github.com/argoproj/argo-events/controllers/sensor" @@ -30,7 +33,16 @@ func NewSensorValidator(client kubernetes.Interface, ebClient eventbusclient.Int } func (s *sensor) ValidateCreate(ctx context.Context) *admissionv1.AdmissionResponse { - if err := sensorcontroller.ValidateSensor(s.newSensor); err != nil { + eventBusName := common.DefaultEventBusName + if len(s.newSensor.Spec.EventBusName) > 0 { + eventBusName = s.newSensor.Spec.EventBusName + } + eventBus, err := s.eventBusClient.ArgoprojV1alpha1().EventBus(s.newSensor.Namespace).Get(ctx, eventBusName, metav1.GetOptions{}) + if err != nil { + return DeniedResponse(fmt.Sprintf("failed to get EventBus eventBusName=%s; err=%v", eventBusName, err)) + } + + if err := sensorcontroller.ValidateSensor(s.newSensor, eventBus); err != nil { return DeniedResponse(err.Error()) } return AllowedResponse() diff --git a/webhook/validator/sensor_test.go b/webhook/validator/sensor_test.go index d31068a4cb..d75f2a954a 100644 --- a/webhook/validator/sensor_test.go +++ b/webhook/validator/sensor_test.go @@ -1,22 +1,71 @@ package validator import ( + "context" "fmt" - "io/ioutil" + "os" "testing" "github.com/ghodss/yaml" "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/argoproj/argo-events/common" + eventbusv1alpha1 "github.com/argoproj/argo-events/pkg/apis/eventbus/v1alpha1" "github.com/argoproj/argo-events/pkg/apis/sensor/v1alpha1" ) +var ( + fakeBus = &eventbusv1alpha1.EventBus{ + TypeMeta: metav1.TypeMeta{ + APIVersion: eventbusv1alpha1.SchemeGroupVersion.String(), + Kind: "EventBus", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: testNamespace, + Name: common.DefaultEventBusName, + }, + Spec: eventbusv1alpha1.EventBusSpec{ + NATS: &eventbusv1alpha1.NATSBus{ + Native: &eventbusv1alpha1.NativeStrategy{ + Auth: &eventbusv1alpha1.AuthStrategyToken, + }, + }, + }, + Status: eventbusv1alpha1.EventBusStatus{ + Config: eventbusv1alpha1.BusConfig{ + NATS: &eventbusv1alpha1.NATSConfig{ + URL: "nats://xxxx", + Auth: &eventbusv1alpha1.AuthStrategyToken, + AccessSecret: &corev1.SecretKeySelector{ + Key: "test-key", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-name", + }, + }, + }, + }, + }, + } +) + func TestValidateSensor(t *testing.T) { dir := "../../examples/sensors" - files, err := ioutil.ReadDir(dir) + dirEntries, err := os.ReadDir(dir) assert.Nil(t, err) - for _, file := range files { - content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s", dir, file.Name())) + + testBus := fakeBus.DeepCopy() + testBus.Status.MarkDeployed("test", "test") + testBus.Status.MarkConfigured() + _, err = fakeEventBusClient.ArgoprojV1alpha1().EventBus(testNamespace).Create(context.TODO(), testBus, metav1.CreateOptions{}) + assert.Nil(t, err) + + for _, entry := range dirEntries { + if entry.IsDir() { + continue + } + content, err := os.ReadFile(fmt.Sprintf("%s/%s", dir, entry.Name())) assert.Nil(t, err) var sensor *v1alpha1.Sensor err = yaml.Unmarshal(content, &sensor) diff --git a/webhook/validator/validator.go b/webhook/validator/validator.go index f9363b27af..003bccc1d5 100644 --- a/webhook/validator/validator.go +++ b/webhook/validator/validator.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" - "github.com/pkg/errors" admissionv1 "k8s.io/api/admission/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -90,7 +89,7 @@ func GetValidator(ctx context.Context, client kubernetes.Interface, ebClient eve } return NewSensorValidator(client, ebClient, esClient, sensorClient, old, new), nil default: - return nil, errors.Errorf("unrecognized GVK %v", kind) + return nil, fmt.Errorf("unrecognized GVK %v", kind) } } diff --git a/webhook/validator/validator_test.go b/webhook/validator/validator_test.go index af05910775..b5191816a1 100644 --- a/webhook/validator/validator_test.go +++ b/webhook/validator/validator_test.go @@ -123,11 +123,6 @@ func fakeSensor() *sensorv1alpha1.Sensor { Template: &sensorv1alpha1.TriggerTemplate{ Name: "fake-trigger", K8s: &sensorv1alpha1.StandardK8STrigger{ - GroupVersionResource: metav1.GroupVersionResource{ - Group: "k8s.io", - Version: "", - Resource: "pods", - }, Operation: "create", Source: &sensorv1alpha1.ArtifactLocation{}, }, diff --git a/webhook/webhook.go b/webhook/webhook.go index b74bdba3f3..950979ec18 100644 --- a/webhook/webhook.go +++ b/webhook/webhook.go @@ -13,12 +13,12 @@ import ( "time" "github.com/go-openapi/inflect" - "github.com/pkg/errors" "go.uber.org/zap" admissionv1 "k8s.io/api/admission/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -53,6 +53,9 @@ type Options struct { // DeploymentName is the deployment name of the webhook. DeploymentName string + // ClusterRoleName is the cluster role name of the webhook + ClusterRoleName string + // SecretName is the name of k8s secret that contains the webhook // server key/cert and corresponding CA cert that signed them. The // server key/cert are used to serve the webhook and the CA cert @@ -118,7 +121,7 @@ func (ac *AdmissionController) Run(ctx context.Context) error { case <-ctx.Done(): return server.Close() case <-serverStartErrCh: - return errors.New("webhook server failed to start") + return fmt.Errorf("webhook server failed to start") } } @@ -180,29 +183,29 @@ func (ac *AdmissionController) register( FailurePolicy: &failurePolicy, }}, } - deployment, err := ac.Client.AppsV1().Deployments(ac.Options.Namespace).Get(ctx, ac.Options.DeploymentName, metav1.GetOptions{}) + clusterRole, err := ac.Client.RbacV1().ClusterRoles().Get(ctx, ac.Options.ClusterRoleName, metav1.GetOptions{}) if err != nil { - return errors.Wrapf(err, "failed to fetch webhook deployment") + return fmt.Errorf("failed to fetch webhook cluster role, %w", err) } - deploymentRef := metav1.NewControllerRef(deployment, appsv1.SchemeGroupVersion.WithKind("Deployment")) - webhook.OwnerReferences = append(webhook.OwnerReferences, *deploymentRef) + clusterRoleRef := metav1.NewControllerRef(clusterRole, rbacv1.SchemeGroupVersion.WithKind("ClusterRole")) + webhook.OwnerReferences = append(webhook.OwnerReferences, *clusterRoleRef) _, err = client.Create(ctx, webhook, metav1.CreateOptions{}) if err != nil { if !apierrors.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to create a webhook") + return fmt.Errorf("failed to create a webhook, %w", err) } ac.Logger.Info("Webhook already exists") configuredWebhook, err := client.Get(ctx, ac.Options.WebhookName, metav1.GetOptions{}) if err != nil { - return errors.Wrap(err, "failed to retrieve webhook") + return fmt.Errorf("failed to retrieve webhook, %w", err) } if !reflect.DeepEqual(configuredWebhook.Webhooks, webhook.Webhooks) { ac.Logger.Info("Updating webhook") // Set the ResourceVersion as required by update. webhook.ObjectMeta.ResourceVersion = configuredWebhook.ObjectMeta.ResourceVersion if _, err := client.Update(ctx, webhook, metav1.UpdateOptions{}); err != nil { - return errors.Wrap(err, "failed to update webhook") + return fmt.Errorf("failed to update webhook, %w", err) } } else { ac.Logger.Info("Webhook is valid") @@ -288,13 +291,13 @@ func (ac *AdmissionController) generateSecret(ctx context.Context) (*corev1.Secr hosts := []string{} hosts = append(hosts, fmt.Sprintf("%s.%s.svc.cluster.local", ac.Options.ServiceName, ac.Options.Namespace)) hosts = append(hosts, fmt.Sprintf("%s.%s.svc", ac.Options.ServiceName, ac.Options.Namespace)) - serverKey, serverCert, caCert, err := commontls.CreateCerts(certOrg, hosts, time.Now().Add(10*365*24*time.Hour)) + serverKey, serverCert, caCert, err := commontls.CreateCerts(certOrg, hosts, time.Now().Add(10*365*24*time.Hour), true, false) if err != nil { return nil, err } deployment, err := ac.Client.AppsV1().Deployments(ac.Options.Namespace).Get(ctx, ac.Options.DeploymentName, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrapf(err, "Failed to fetch webhook deployment") + return nil, fmt.Errorf("Failed to fetch webhook deployment, %w", err) } deploymentRef := metav1.NewControllerRef(deployment, appsv1.SchemeGroupVersion.WithKind("Deployment")) secret := &corev1.Secret{ @@ -337,13 +340,13 @@ func (ac *AdmissionController) getOrGenerateKeyCertsFromSecret(ctx context.Conte var ok bool if serverKey, ok = secret.Data[secretServerKey]; !ok { - return nil, nil, nil, errors.New("server key missing") + return nil, nil, nil, fmt.Errorf("server key missing") } if serverCert, ok = secret.Data[secretServerCert]; !ok { - return nil, nil, nil, errors.New("server cert missing") + return nil, nil, nil, fmt.Errorf("server cert missing") } if caCert, ok = secret.Data[secretCACert]; !ok { - return nil, nil, nil, errors.New("ca cert missing") + return nil, nil, nil, fmt.Errorf("ca cert missing") } return serverKey, serverCert, caCert, nil } @@ -360,7 +363,7 @@ func (ac *AdmissionController) getAPIServerExtensionCACert(ctx context.Context) const caFileName = "requestheader-client-ca-file" pem, ok := c.Data[caFileName] if !ok { - return nil, errors.Errorf("cannot find %s in ConfigMap %s", caFileName, name) + return nil, fmt.Errorf("cannot find %s in ConfigMap %s", caFileName, name) } return []byte(pem), nil } diff --git a/webhook/webhook_test.go b/webhook/webhook_test.go index 695965cea5..9b26ff35e0 100644 --- a/webhook/webhook_test.go +++ b/webhook/webhook_test.go @@ -7,7 +7,6 @@ import ( "net" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" admissionv1 "k8s.io/api/admission/v1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" @@ -26,12 +25,13 @@ import ( func fakeOptions() Options { return Options{ - Namespace: "test-ns", - DeploymentName: "events-webhook", - ServiceName: "webhook", - Port: 443, - SecretName: "webhook-certs", - WebhookName: "webhook.argo-events.argoproj.io", + Namespace: "test-ns", + DeploymentName: "events-webhook", + ClusterRoleName: "argo-events-webhook", + ServiceName: "webhook", + Port: 8443, + SecretName: "webhook-certs", + WebhookName: "webhook.argo-events.argoproj.io", } } @@ -138,7 +138,7 @@ func createWebhook(ac *AdmissionController, wh *admissionregistrationv1.Validati client := ac.Client.AdmissionregistrationV1().ValidatingWebhookConfigurations() _, err := client.Create(context.TODO(), wh, metav1.CreateOptions{}) if err != nil { - panic(errors.Wrap(err, "failed to create test webhook: %s")) + panic(fmt.Errorf("failed to create test webhook, %w", err)) } }